1 """Python wrappers around TensorFlow ops.
   2 
   3 This file is MACHINE GENERATED! Do not edit.
   4 Original C++ source file: array_ops.cc
   5 """
   6 
   7 import collections as _collections
   8 import six as _six
   9 
  10 from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
  11 from tensorflow.python.eager import context as _context
  12 from tensorflow.python.eager import core as _core
  13 from tensorflow.python.eager import execute as _execute
  14 from tensorflow.python.framework import dtypes as _dtypes
  15 from tensorflow.python.framework import errors as _errors
  16 from tensorflow.python.framework import tensor_shape as _tensor_shape
  17 
  18 from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
  19 # Needed to trigger the call to _set_call_cpp_shape_fn.
  20 from tensorflow.python.framework import common_shapes as _common_shapes
  21 from tensorflow.python.framework import op_def_registry as _op_def_registry
  22 from tensorflow.python.framework import ops as _ops
  23 from tensorflow.python.framework import op_def_library as _op_def_library
  24 from tensorflow.python.util.deprecation import deprecated_endpoints
  25 from tensorflow.python.util.tf_export import tf_export
  26 
  27 
  28 def batch_matrix_band_part(input, num_lower, num_upper, name=None):
  29   r"""TODO: add doc.
  30 
  31   Args:
  32     input: A `Tensor`.
  33     num_lower: A `Tensor` of type `int64`.
  34     num_upper: A `Tensor` of type `int64`.
  35     name: A name for the operation (optional).
  36 
  37   Returns:
  38     A `Tensor`. Has the same type as `input`.
  39   """
  40   _ctx = _context._context
  41   if _ctx is None or not _ctx._eager_context.is_eager:
  42     _, _, _op = _op_def_lib._apply_op_helper(
  43         "BatchMatrixBandPart", input=input, num_lower=num_lower,
  44         num_upper=num_upper, name=name)
  45     _result = _op.outputs[:]
  46     _inputs_flat = _op.inputs
  47     _attrs = ("T", _op.get_attr("T"))
  48     _execute.record_gradient(
  49       "BatchMatrixBandPart", _inputs_flat, _attrs, _result, name)
  50     _result, = _result
  51     return _result
  52 
  53   else:
  54     try:
  55       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
  56         _ctx._context_handle, _ctx._eager_context.device_name,
  57         "BatchMatrixBandPart", name, _ctx._post_execution_callbacks, input,
  58         num_lower, num_upper)
  59       return _result
  60     except _core._FallbackException:
  61       return batch_matrix_band_part_eager_fallback(
  62           input, num_lower, num_upper, name=name, ctx=_ctx)
  63     except _core._NotOkStatusException as e:
  64       if name is not None:
  65         message = e.message + " name: " + name
  66       else:
  67         message = e.message
  68       _six.raise_from(_core._status_to_exception(e.code, message), None)
  69 
  70 
  71 def batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None):
  72   r"""This is the slowpath function for Eager mode.
  73   This is for function batch_matrix_band_part
  74   """
  75   _ctx = ctx if ctx else _context.context()
  76   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
  77   num_lower = _ops.convert_to_tensor(num_lower, _dtypes.int64)
  78   num_upper = _ops.convert_to_tensor(num_upper, _dtypes.int64)
  79   _inputs_flat = [input, num_lower, num_upper]
  80   _attrs = ("T", _attr_T)
  81   _result = _execute.execute(b"BatchMatrixBandPart", 1, inputs=_inputs_flat,
  82                              attrs=_attrs, ctx=_ctx, name=name)
  83   _execute.record_gradient(
  84       "BatchMatrixBandPart", _inputs_flat, _attrs, _result, name)
  85   _result, = _result
  86   return _result
  87 
  88 
  89 def batch_matrix_diag(diagonal, name=None):
  90   r"""TODO: add doc.
  91 
  92   Args:
  93     diagonal: A `Tensor`.
  94     name: A name for the operation (optional).
  95 
  96   Returns:
  97     A `Tensor`. Has the same type as `diagonal`.
  98   """
  99   _ctx = _context._context
 100   if _ctx is None or not _ctx._eager_context.is_eager:
 101     _, _, _op = _op_def_lib._apply_op_helper(
 102         "BatchMatrixDiag", diagonal=diagonal, name=name)
 103     _result = _op.outputs[:]
 104     _inputs_flat = _op.inputs
 105     _attrs = ("T", _op.get_attr("T"))
 106     _execute.record_gradient(
 107       "BatchMatrixDiag", _inputs_flat, _attrs, _result, name)
 108     _result, = _result
 109     return _result
 110 
 111   else:
 112     try:
 113       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 114         _ctx._context_handle, _ctx._eager_context.device_name,
 115         "BatchMatrixDiag", name, _ctx._post_execution_callbacks, diagonal)
 116       return _result
 117     except _core._FallbackException:
 118       return batch_matrix_diag_eager_fallback(
 119           diagonal, name=name, ctx=_ctx)
 120     except _core._NotOkStatusException as e:
 121       if name is not None:
 122         message = e.message + " name: " + name
 123       else:
 124         message = e.message
 125       _six.raise_from(_core._status_to_exception(e.code, message), None)
 126 
 127 
 128 def batch_matrix_diag_eager_fallback(diagonal, name=None, ctx=None):
 129   r"""This is the slowpath function for Eager mode.
 130   This is for function batch_matrix_diag
 131   """
 132   _ctx = ctx if ctx else _context.context()
 133   _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)
 134   _inputs_flat = [diagonal]
 135   _attrs = ("T", _attr_T)
 136   _result = _execute.execute(b"BatchMatrixDiag", 1, inputs=_inputs_flat,
 137                              attrs=_attrs, ctx=_ctx, name=name)
 138   _execute.record_gradient(
 139       "BatchMatrixDiag", _inputs_flat, _attrs, _result, name)
 140   _result, = _result
 141   return _result
 142 
 143 
 144 def batch_matrix_diag_part(input, name=None):
 145   r"""TODO: add doc.
 146 
 147   Args:
 148     input: A `Tensor`.
 149     name: A name for the operation (optional).
 150 
 151   Returns:
 152     A `Tensor`. Has the same type as `input`.
 153   """
 154   _ctx = _context._context
 155   if _ctx is None or not _ctx._eager_context.is_eager:
 156     _, _, _op = _op_def_lib._apply_op_helper(
 157         "BatchMatrixDiagPart", input=input, name=name)
 158     _result = _op.outputs[:]
 159     _inputs_flat = _op.inputs
 160     _attrs = ("T", _op.get_attr("T"))
 161     _execute.record_gradient(
 162       "BatchMatrixDiagPart", _inputs_flat, _attrs, _result, name)
 163     _result, = _result
 164     return _result
 165 
 166   else:
 167     try:
 168       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 169         _ctx._context_handle, _ctx._eager_context.device_name,
 170         "BatchMatrixDiagPart", name, _ctx._post_execution_callbacks, input)
 171       return _result
 172     except _core._FallbackException:
 173       return batch_matrix_diag_part_eager_fallback(
 174           input, name=name, ctx=_ctx)
 175     except _core._NotOkStatusException as e:
 176       if name is not None:
 177         message = e.message + " name: " + name
 178       else:
 179         message = e.message
 180       _six.raise_from(_core._status_to_exception(e.code, message), None)
 181 
 182 
 183 def batch_matrix_diag_part_eager_fallback(input, name=None, ctx=None):
 184   r"""This is the slowpath function for Eager mode.
 185   This is for function batch_matrix_diag_part
 186   """
 187   _ctx = ctx if ctx else _context.context()
 188   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 189   _inputs_flat = [input]
 190   _attrs = ("T", _attr_T)
 191   _result = _execute.execute(b"BatchMatrixDiagPart", 1, inputs=_inputs_flat,
 192                              attrs=_attrs, ctx=_ctx, name=name)
 193   _execute.record_gradient(
 194       "BatchMatrixDiagPart", _inputs_flat, _attrs, _result, name)
 195   _result, = _result
 196   return _result
 197 
 198 
 199 def batch_matrix_set_diag(input, diagonal, name=None):
 200   r"""TODO: add doc.
 201 
 202   Args:
 203     input: A `Tensor`.
 204     diagonal: A `Tensor`. Must have the same type as `input`.
 205     name: A name for the operation (optional).
 206 
 207   Returns:
 208     A `Tensor`. Has the same type as `input`.
 209   """
 210   _ctx = _context._context
 211   if _ctx is None or not _ctx._eager_context.is_eager:
 212     _, _, _op = _op_def_lib._apply_op_helper(
 213         "BatchMatrixSetDiag", input=input, diagonal=diagonal, name=name)
 214     _result = _op.outputs[:]
 215     _inputs_flat = _op.inputs
 216     _attrs = ("T", _op.get_attr("T"))
 217     _execute.record_gradient(
 218       "BatchMatrixSetDiag", _inputs_flat, _attrs, _result, name)
 219     _result, = _result
 220     return _result
 221 
 222   else:
 223     try:
 224       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 225         _ctx._context_handle, _ctx._eager_context.device_name,
 226         "BatchMatrixSetDiag", name, _ctx._post_execution_callbacks, input,
 227         diagonal)
 228       return _result
 229     except _core._FallbackException:
 230       return batch_matrix_set_diag_eager_fallback(
 231           input, diagonal, name=name, ctx=_ctx)
 232     except _core._NotOkStatusException as e:
 233       if name is not None:
 234         message = e.message + " name: " + name
 235       else:
 236         message = e.message
 237       _six.raise_from(_core._status_to_exception(e.code, message), None)
 238 
 239 
 240 def batch_matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None):
 241   r"""This is the slowpath function for Eager mode.
 242   This is for function batch_matrix_set_diag
 243   """
 244   _ctx = ctx if ctx else _context.context()
 245   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx)
 246   (input, diagonal) = _inputs_T
 247   _inputs_flat = [input, diagonal]
 248   _attrs = ("T", _attr_T)
 249   _result = _execute.execute(b"BatchMatrixSetDiag", 1, inputs=_inputs_flat,
 250                              attrs=_attrs, ctx=_ctx, name=name)
 251   _execute.record_gradient(
 252       "BatchMatrixSetDiag", _inputs_flat, _attrs, _result, name)
 253   _result, = _result
 254   return _result
 255 
 256 
 257 def batch_to_space(input, crops, block_size, name=None):
 258   r"""BatchToSpace for 4-D tensors of type T.
 259 
 260   This is a legacy version of the more general BatchToSpaceND.
 261 
 262   Rearranges (permutes) data from batch into blocks of spatial data, followed by
 263   cropping. This is the reverse transformation of SpaceToBatch. More specifically,
 264   this op outputs a copy of the input tensor where values from the `batch`
 265   dimension are moved in spatial blocks to the `height` and `width` dimensions,
 266   followed by cropping along the `height` and `width` dimensions.
 267 
 268   Args:
 269     input: A `Tensor`. 4-D tensor with shape
 270       `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
 271         depth]`. Note that the batch size of the input tensor must be divisible by
 272       `block_size * block_size`.
 273     crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 274       2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
 275       how many elements to crop from the intermediate result across the spatial
 276       dimensions as follows:
 277 
 278           crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
 279     block_size: An `int` that is `>= 2`.
 280     name: A name for the operation (optional).
 281 
 282   Returns:
 283     A `Tensor`. Has the same type as `input`.
 284   """
 285   _ctx = _context._context
 286   if _ctx is None or not _ctx._eager_context.is_eager:
 287     block_size = _execute.make_int(block_size, "block_size")
 288     _, _, _op = _op_def_lib._apply_op_helper(
 289         "BatchToSpace", input=input, crops=crops, block_size=block_size,
 290         name=name)
 291     _result = _op.outputs[:]
 292     _inputs_flat = _op.inputs
 293     _attrs = ("T", _op.get_attr("T"), "block_size",
 294               _op.get_attr("block_size"), "Tidx", _op.get_attr("Tidx"))
 295     _execute.record_gradient(
 296       "BatchToSpace", _inputs_flat, _attrs, _result, name)
 297     _result, = _result
 298     return _result
 299 
 300   else:
 301     try:
 302       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 303         _ctx._context_handle, _ctx._eager_context.device_name, "BatchToSpace",
 304         name, _ctx._post_execution_callbacks, input, crops, "block_size",
 305         block_size)
 306       return _result
 307     except _core._FallbackException:
 308       return batch_to_space_eager_fallback(
 309           input, crops, block_size=block_size, name=name, ctx=_ctx)
 310     except _core._NotOkStatusException as e:
 311       if name is not None:
 312         message = e.message + " name: " + name
 313       else:
 314         message = e.message
 315       _six.raise_from(_core._status_to_exception(e.code, message), None)
 316 
 317 
 318 def batch_to_space_eager_fallback(input, crops, block_size, name=None, ctx=None):
 319   r"""This is the slowpath function for Eager mode.
 320   This is for function batch_to_space
 321   """
 322   _ctx = ctx if ctx else _context.context()
 323   block_size = _execute.make_int(block_size, "block_size")
 324   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 325   _attr_Tidx, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32)
 326   _inputs_flat = [input, crops]
 327   _attrs = ("T", _attr_T, "block_size", block_size, "Tidx", _attr_Tidx)
 328   _result = _execute.execute(b"BatchToSpace", 1, inputs=_inputs_flat,
 329                              attrs=_attrs, ctx=_ctx, name=name)
 330   _execute.record_gradient(
 331       "BatchToSpace", _inputs_flat, _attrs, _result, name)
 332   _result, = _result
 333   return _result
 334 
 335 
 336 @tf_export('batch_to_space_nd', 'manip.batch_to_space_nd')
 337 @deprecated_endpoints('manip.batch_to_space_nd')
 338 def batch_to_space_nd(input, block_shape, crops, name=None):
 339   r"""BatchToSpace for N-D tensors of type T.
 340 
 341   This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
 342   `block_shape + [batch]`, interleaves these blocks back into the grid defined by
 343   the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
 344   the input.  The spatial dimensions of this intermediate result are then
 345   optionally cropped according to `crops` to produce the output.  This is the
 346   reverse of SpaceToBatch.  See below for a precise description.
 347 
 348   Args:
 349     input: A `Tensor`.
 350       N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
 351       where spatial_shape has M dimensions.
 352     block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 353       1-D with shape `[M]`, all values must be >= 1.
 354     crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 355       2-D with shape `[M, 2]`, all values must be >= 0.
 356         `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
 357         dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
 358         required that
 359         `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
 360 
 361       This operation is equivalent to the following steps:
 362 
 363       1. Reshape `input` to `reshaped` of shape:
 364            [block_shape[0], ..., block_shape[M-1],
 365             batch / prod(block_shape),
 366             input_shape[1], ..., input_shape[N-1]]
 367 
 368       2. Permute dimensions of `reshaped` to produce `permuted` of shape
 369            [batch / prod(block_shape),
 370 
 371             input_shape[1], block_shape[0],
 372             ...,
 373             input_shape[M], block_shape[M-1],
 374 
 375             input_shape[M+1], ..., input_shape[N-1]]
 376 
 377       3. Reshape `permuted` to produce `reshaped_permuted` of shape
 378            [batch / prod(block_shape),
 379 
 380             input_shape[1] * block_shape[0],
 381             ...,
 382             input_shape[M] * block_shape[M-1],
 383 
 384             input_shape[M+1],
 385             ...,
 386             input_shape[N-1]]
 387 
 388       4. Crop the start and end of dimensions `[1, ..., M]` of
 389          `reshaped_permuted` according to `crops` to produce the output of shape:
 390            [batch / prod(block_shape),
 391 
 392             input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
 393             ...,
 394             input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
 395 
 396             input_shape[M+1], ..., input_shape[N-1]]
 397 
 398       Some examples:
 399 
 400       (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
 401           `crops = [[0, 0], [0, 0]]`:
 402 
 403       ```
 404       [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
 405       ```
 406 
 407       The output tensor has shape `[1, 2, 2, 1]` and value:
 408 
 409       ```
 410       x = [[[[1], [2]], [[3], [4]]]]
 411       ```
 412 
 413       (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
 414           `crops = [[0, 0], [0, 0]]`:
 415 
 416       ```
 417       [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
 418       ```
 419 
 420       The output tensor has shape `[1, 2, 2, 3]` and value:
 421 
 422       ```
 423       x = [[[[1, 2, 3], [4, 5, 6]],
 424             [[7, 8, 9], [10, 11, 12]]]]
 425       ```
 426 
 427       (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
 428           `crops = [[0, 0], [0, 0]]`:
 429 
 430       ```
 431       x = [[[[1], [3]], [[9], [11]]],
 432            [[[2], [4]], [[10], [12]]],
 433            [[[5], [7]], [[13], [15]]],
 434            [[[6], [8]], [[14], [16]]]]
 435       ```
 436 
 437       The output tensor has shape `[1, 4, 4, 1]` and value:
 438 
 439       ```
 440       x = [[[1],   [2],  [3],  [4]],
 441            [[5],   [6],  [7],  [8]],
 442            [[9],  [10], [11],  [12]],
 443            [[13], [14], [15],  [16]]]
 444       ```
 445 
 446       (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
 447           `crops = [[0, 0], [2, 0]]`:
 448 
 449       ```
 450       x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
 451            [[[0], [2], [4]]], [[[0], [10], [12]]],
 452            [[[0], [5], [7]]], [[[0], [13], [15]]],
 453            [[[0], [6], [8]]], [[[0], [14], [16]]]]
 454       ```
 455 
 456       The output tensor has shape `[2, 2, 4, 1]` and value:
 457 
 458       ```
 459       x = [[[[1],   [2],  [3],  [4]],
 460             [[5],   [6],  [7],  [8]]],
 461            [[[9],  [10], [11],  [12]],
 462             [[13], [14], [15],  [16]]]]
 463       ```
 464     name: A name for the operation (optional).
 465 
 466   Returns:
 467     A `Tensor`. Has the same type as `input`.
 468   """
 469   _ctx = _context._context
 470   if _ctx is None or not _ctx._eager_context.is_eager:
 471     _, _, _op = _op_def_lib._apply_op_helper(
 472         "BatchToSpaceND", input=input, block_shape=block_shape, crops=crops,
 473         name=name)
 474     _result = _op.outputs[:]
 475     _inputs_flat = _op.inputs
 476     _attrs = ("T", _op.get_attr("T"), "Tblock_shape",
 477               _op.get_attr("Tblock_shape"), "Tcrops", _op.get_attr("Tcrops"))
 478     _execute.record_gradient(
 479       "BatchToSpaceND", _inputs_flat, _attrs, _result, name)
 480     _result, = _result
 481     return _result
 482 
 483   else:
 484     try:
 485       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 486         _ctx._context_handle, _ctx._eager_context.device_name,
 487         "BatchToSpaceND", name, _ctx._post_execution_callbacks, input,
 488         block_shape, crops)
 489       return _result
 490     except _core._FallbackException:
 491       return batch_to_space_nd_eager_fallback(
 492           input, block_shape, crops, name=name, ctx=_ctx)
 493     except _core._NotOkStatusException as e:
 494       if name is not None:
 495         message = e.message + " name: " + name
 496       else:
 497         message = e.message
 498       _six.raise_from(_core._status_to_exception(e.code, message), None)
 499 
 500 
 501 def batch_to_space_nd_eager_fallback(input, block_shape, crops, name=None, ctx=None):
 502   r"""This is the slowpath function for Eager mode.
 503   This is for function batch_to_space_nd
 504   """
 505   _ctx = ctx if ctx else _context.context()
 506   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 507   _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32)
 508   _attr_Tcrops, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32)
 509   _inputs_flat = [input, block_shape, crops]
 510   _attrs = ("T", _attr_T, "Tblock_shape", _attr_Tblock_shape, "Tcrops",
 511   _attr_Tcrops)
 512   _result = _execute.execute(b"BatchToSpaceND", 1, inputs=_inputs_flat,
 513                              attrs=_attrs, ctx=_ctx, name=name)
 514   _execute.record_gradient(
 515       "BatchToSpaceND", _inputs_flat, _attrs, _result, name)
 516   _result, = _result
 517   return _result
 518 
 519 
 520 @tf_export('bitcast')
 521 def bitcast(input, type, name=None):
 522   r"""Bitcasts a tensor from one type to another without copying data.
 523 
 524   Given a tensor `input`, this operation returns a tensor that has the same buffer
 525   data as `input` with datatype `type`.
 526 
 527   If the input datatype `T` is larger than the output datatype `type` then the
 528   shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
 529 
 530   If `T` is smaller than `type`, the operator requires that the rightmost
 531   dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
 532   [..., sizeof(`type`)/sizeof(`T`)] to [...].
 533 
 534   *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
 535   endian orderings will give different results.
 536 
 537   Args:
 538     input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.
 539     type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`.
 540     name: A name for the operation (optional).
 541 
 542   Returns:
 543     A `Tensor` of type `type`.
 544   """
 545   _ctx = _context._context
 546   if _ctx is None or not _ctx._eager_context.is_eager:
 547     type = _execute.make_type(type, "type")
 548     _, _, _op = _op_def_lib._apply_op_helper(
 549         "Bitcast", input=input, type=type, name=name)
 550     _result = _op.outputs[:]
 551     _inputs_flat = _op.inputs
 552     _attrs = ("T", _op.get_attr("T"), "type", _op.get_attr("type"))
 553     _execute.record_gradient(
 554       "Bitcast", _inputs_flat, _attrs, _result, name)
 555     _result, = _result
 556     return _result
 557 
 558   else:
 559     try:
 560       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 561         _ctx._context_handle, _ctx._eager_context.device_name, "Bitcast",
 562         name, _ctx._post_execution_callbacks, input, "type", type)
 563       return _result
 564     except _core._FallbackException:
 565       return bitcast_eager_fallback(
 566           input, type=type, name=name, ctx=_ctx)
 567     except _core._NotOkStatusException as e:
 568       if name is not None:
 569         message = e.message + " name: " + name
 570       else:
 571         message = e.message
 572       _six.raise_from(_core._status_to_exception(e.code, message), None)
 573 
 574 
 575 def bitcast_eager_fallback(input, type, name=None, ctx=None):
 576   r"""This is the slowpath function for Eager mode.
 577   This is for function bitcast
 578   """
 579   _ctx = ctx if ctx else _context.context()
 580   type = _execute.make_type(type, "type")
 581   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 582   _inputs_flat = [input]
 583   _attrs = ("T", _attr_T, "type", type)
 584   _result = _execute.execute(b"Bitcast", 1, inputs=_inputs_flat, attrs=_attrs,
 585                              ctx=_ctx, name=name)
 586   _execute.record_gradient(
 587       "Bitcast", _inputs_flat, _attrs, _result, name)
 588   _result, = _result
 589   return _result
 590 
 591 
 592 def broadcast_args(s0, s1, name=None):
 593   r"""Return the shape of s0 op s1 with broadcast.
 594 
 595   Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
 596   broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
 597 
 598   Args:
 599     s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 600     s1: A `Tensor`. Must have the same type as `s0`.
 601     name: A name for the operation (optional).
 602 
 603   Returns:
 604     A `Tensor`. Has the same type as `s0`.
 605   """
 606   _ctx = _context._context
 607   if _ctx is None or not _ctx._eager_context.is_eager:
 608     _, _, _op = _op_def_lib._apply_op_helper(
 609         "BroadcastArgs", s0=s0, s1=s1, name=name)
 610     _result = _op.outputs[:]
 611     _inputs_flat = _op.inputs
 612     _attrs = ("T", _op.get_attr("T"))
 613     _execute.record_gradient(
 614       "BroadcastArgs", _inputs_flat, _attrs, _result, name)
 615     _result, = _result
 616     return _result
 617 
 618   else:
 619     try:
 620       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 621         _ctx._context_handle, _ctx._eager_context.device_name,
 622         "BroadcastArgs", name, _ctx._post_execution_callbacks, s0, s1)
 623       return _result
 624     except _core._FallbackException:
 625       return broadcast_args_eager_fallback(
 626           s0, s1, name=name, ctx=_ctx)
 627     except _core._NotOkStatusException as e:
 628       if name is not None:
 629         message = e.message + " name: " + name
 630       else:
 631         message = e.message
 632       _six.raise_from(_core._status_to_exception(e.code, message), None)
 633 
 634 
 635 def broadcast_args_eager_fallback(s0, s1, name=None, ctx=None):
 636   r"""This is the slowpath function for Eager mode.
 637   This is for function broadcast_args
 638   """
 639   _ctx = ctx if ctx else _context.context()
 640   _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32)
 641   (s0, s1) = _inputs_T
 642   _inputs_flat = [s0, s1]
 643   _attrs = ("T", _attr_T)
 644   _result = _execute.execute(b"BroadcastArgs", 1, inputs=_inputs_flat,
 645                              attrs=_attrs, ctx=_ctx, name=name)
 646   _execute.record_gradient(
 647       "BroadcastArgs", _inputs_flat, _attrs, _result, name)
 648   _result, = _result
 649   return _result
 650 
 651 
 652 _broadcast_gradient_args_outputs = ["r0", "r1"]
 653 _BroadcastGradientArgsOutput = _collections.namedtuple(
 654     "BroadcastGradientArgs", _broadcast_gradient_args_outputs)
 655 
 656 
 657 def broadcast_gradient_args(s0, s1, name=None):
 658   r"""Return the reduction indices for computing gradients of s0 op s1 with broadcast.
 659 
 660   This is typically used by gradient computations for a broadcasting operation.
 661 
 662   Args:
 663     s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 664     s1: A `Tensor`. Must have the same type as `s0`.
 665     name: A name for the operation (optional).
 666 
 667   Returns:
 668     A tuple of `Tensor` objects (r0, r1).
 669 
 670     r0: A `Tensor`. Has the same type as `s0`.
 671     r1: A `Tensor`. Has the same type as `s0`.
 672   """
 673   _ctx = _context._context
 674   if _ctx is None or not _ctx._eager_context.is_eager:
 675     _, _, _op = _op_def_lib._apply_op_helper(
 676         "BroadcastGradientArgs", s0=s0, s1=s1, name=name)
 677     _result = _op.outputs[:]
 678     _inputs_flat = _op.inputs
 679     _attrs = ("T", _op.get_attr("T"))
 680     _execute.record_gradient(
 681       "BroadcastGradientArgs", _inputs_flat, _attrs, _result, name)
 682     _result = _BroadcastGradientArgsOutput._make(_result)
 683     return _result
 684 
 685   else:
 686     try:
 687       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 688         _ctx._context_handle, _ctx._eager_context.device_name,
 689         "BroadcastGradientArgs", name, _ctx._post_execution_callbacks, s0, s1)
 690       _result = _BroadcastGradientArgsOutput._make(_result)
 691       return _result
 692     except _core._FallbackException:
 693       return broadcast_gradient_args_eager_fallback(
 694           s0, s1, name=name, ctx=_ctx)
 695     except _core._NotOkStatusException as e:
 696       if name is not None:
 697         message = e.message + " name: " + name
 698       else:
 699         message = e.message
 700       _six.raise_from(_core._status_to_exception(e.code, message), None)
 701 
 702 
 703 def broadcast_gradient_args_eager_fallback(s0, s1, name=None, ctx=None):
 704   r"""This is the slowpath function for Eager mode.
 705   This is for function broadcast_gradient_args
 706   """
 707   _ctx = ctx if ctx else _context.context()
 708   _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32)
 709   (s0, s1) = _inputs_T
 710   _inputs_flat = [s0, s1]
 711   _attrs = ("T", _attr_T)
 712   _result = _execute.execute(b"BroadcastGradientArgs", 2, inputs=_inputs_flat,
 713                              attrs=_attrs, ctx=_ctx, name=name)
 714   _execute.record_gradient(
 715       "BroadcastGradientArgs", _inputs_flat, _attrs, _result, name)
 716   _result = _BroadcastGradientArgsOutput._make(_result)
 717   return _result
 718 
 719 
 720 @tf_export('broadcast_to')
 721 def broadcast_to(input, shape, name=None):
 722   r"""Broadcast an array for a compatible shape.
 723 
 724   Broadcasting is the process of making arrays to have compatible shapes
 725   for arithmetic operations. Two shapes are compatible if for each
 726   dimension pair they are either equal or one of them is one. When trying
 727   to broadcast a Tensor to a shape, it starts with the trailing dimensions,
 728   and works its way forward.
 729 
 730   For example,
 731   ```
 732   >>> x = tf.constant([1, 2, 3])
 733   >>> y = tf.broadcast_to(x, [3, 3])
 734   >>> sess.run(y)
 735   array([[1, 2, 3],
 736          [1, 2, 3],
 737          [1, 2, 3]], dtype=int32)
 738   ```
 739   In the above example, the input Tensor with the shape of `[1, 3]`
 740   is broadcasted to output Tensor with shape of `[3, 3]`.
 741 
 742   Args:
 743     input: A `Tensor`. A Tensor to broadcast.
 744     shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 745       An 1-D `int` Tensor. The shape of the desired output.
 746     name: A name for the operation (optional).
 747 
 748   Returns:
 749     A `Tensor`. Has the same type as `input`.
 750   """
 751   _ctx = _context._context
 752   if _ctx is None or not _ctx._eager_context.is_eager:
 753     _, _, _op = _op_def_lib._apply_op_helper(
 754         "BroadcastTo", input=input, shape=shape, name=name)
 755     _result = _op.outputs[:]
 756     _inputs_flat = _op.inputs
 757     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
 758     _execute.record_gradient(
 759       "BroadcastTo", _inputs_flat, _attrs, _result, name)
 760     _result, = _result
 761     return _result
 762 
 763   else:
 764     try:
 765       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 766         _ctx._context_handle, _ctx._eager_context.device_name, "BroadcastTo",
 767         name, _ctx._post_execution_callbacks, input, shape)
 768       return _result
 769     except _core._FallbackException:
 770       return broadcast_to_eager_fallback(
 771           input, shape, name=name, ctx=_ctx)
 772     except _core._NotOkStatusException as e:
 773       if name is not None:
 774         message = e.message + " name: " + name
 775       else:
 776         message = e.message
 777       _six.raise_from(_core._status_to_exception(e.code, message), None)
 778 
 779 
 780 def broadcast_to_eager_fallback(input, shape, name=None, ctx=None):
 781   r"""This is the slowpath function for Eager mode.
 782   This is for function broadcast_to
 783   """
 784   _ctx = ctx if ctx else _context.context()
 785   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 786   _attr_Tidx, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
 787   _inputs_flat = [input, shape]
 788   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
 789   _result = _execute.execute(b"BroadcastTo", 1, inputs=_inputs_flat,
 790                              attrs=_attrs, ctx=_ctx, name=name)
 791   _execute.record_gradient(
 792       "BroadcastTo", _inputs_flat, _attrs, _result, name)
 793   _result, = _result
 794   return _result
 795 
 796 
 797 @tf_export('debugging.check_numerics', 'check_numerics')
 798 @deprecated_endpoints('check_numerics')
 799 def check_numerics(tensor, message, name=None):
 800   r"""Checks a tensor for NaN and Inf values.
 801 
 802   When run, reports an `InvalidArgument` error if `tensor` has any values
 803   that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
 804 
 805   Args:
 806     tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
 807     message: A `string`. Prefix of the error message.
 808     name: A name for the operation (optional).
 809 
 810   Returns:
 811     A `Tensor`. Has the same type as `tensor`.
 812   """
 813   _ctx = _context._context
 814   if _ctx is None or not _ctx._eager_context.is_eager:
 815     message = _execute.make_str(message, "message")
 816     _, _, _op = _op_def_lib._apply_op_helper(
 817         "CheckNumerics", tensor=tensor, message=message, name=name)
 818     _result = _op.outputs[:]
 819     _inputs_flat = _op.inputs
 820     _attrs = ("T", _op.get_attr("T"), "message", _op.get_attr("message"))
 821     _execute.record_gradient(
 822       "CheckNumerics", _inputs_flat, _attrs, _result, name)
 823     _result, = _result
 824     return _result
 825 
 826   else:
 827     try:
 828       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 829         _ctx._context_handle, _ctx._eager_context.device_name,
 830         "CheckNumerics", name, _ctx._post_execution_callbacks, tensor,
 831         "message", message)
 832       return _result
 833     except _core._FallbackException:
 834       return check_numerics_eager_fallback(
 835           tensor, message=message, name=name, ctx=_ctx)
 836     except _core._NotOkStatusException as e:
 837       if name is not None:
 838         message = e.message + " name: " + name
 839       else:
 840         message = e.message
 841       _six.raise_from(_core._status_to_exception(e.code, message), None)
 842 
 843 
 844 def check_numerics_eager_fallback(tensor, message, name=None, ctx=None):
 845   r"""This is the slowpath function for Eager mode.
 846   This is for function check_numerics
 847   """
 848   _ctx = ctx if ctx else _context.context()
 849   message = _execute.make_str(message, "message")
 850   _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)
 851   _inputs_flat = [tensor]
 852   _attrs = ("T", _attr_T, "message", message)
 853   _result = _execute.execute(b"CheckNumerics", 1, inputs=_inputs_flat,
 854                              attrs=_attrs, ctx=_ctx, name=name)
 855   _execute.record_gradient(
 856       "CheckNumerics", _inputs_flat, _attrs, _result, name)
 857   _result, = _result
 858   return _result
 859 
 860 
 861 def concat(concat_dim, values, name=None):
 862   r"""Concatenates tensors along one dimension.
 863 
 864   Args:
 865     concat_dim: A `Tensor` of type `int32`.
 866       0-D.  The dimension along which to concatenate.  Must be in the
 867       range [0, rank(values)).
 868     values: A list of at least 2 `Tensor` objects with the same type.
 869       The `N` Tensors to concatenate. Their ranks and types must match,
 870       and their sizes must match in all dimensions except `concat_dim`.
 871     name: A name for the operation (optional).
 872 
 873   Returns:
 874     A `Tensor`. Has the same type as `values`.
 875   """
 876   _ctx = _context._context
 877   if _ctx is None or not _ctx._eager_context.is_eager:
 878     if not isinstance(values, (list, tuple)):
 879       raise TypeError(
 880           "Expected list for 'values' argument to "
 881           "'concat' Op, not %r." % values)
 882     _attr_N = len(values)
 883     _, _, _op = _op_def_lib._apply_op_helper(
 884         "Concat", concat_dim=concat_dim, values=values, name=name)
 885     _result = _op.outputs[:]
 886     _inputs_flat = _op.inputs
 887     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
 888     _execute.record_gradient(
 889       "Concat", _inputs_flat, _attrs, _result, name)
 890     _result, = _result
 891     return _result
 892 
 893   else:
 894     try:
 895       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 896         _ctx._context_handle, _ctx._eager_context.device_name, "Concat", name,
 897         _ctx._post_execution_callbacks, concat_dim, values)
 898       return _result
 899     except _core._FallbackException:
 900       return concat_eager_fallback(
 901           concat_dim, values, name=name, ctx=_ctx)
 902     except _core._NotOkStatusException as e:
 903       if name is not None:
 904         message = e.message + " name: " + name
 905       else:
 906         message = e.message
 907       _six.raise_from(_core._status_to_exception(e.code, message), None)
 908 
 909 
 910 def concat_eager_fallback(concat_dim, values, name=None, ctx=None):
 911   r"""This is the slowpath function for Eager mode.
 912   This is for function concat
 913   """
 914   _ctx = ctx if ctx else _context.context()
 915   if not isinstance(values, (list, tuple)):
 916     raise TypeError(
 917         "Expected list for 'values' argument to "
 918         "'concat' Op, not %r." % values)
 919   _attr_N = len(values)
 920   _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)
 921   concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)
 922   _inputs_flat = [concat_dim] + list(values)
 923   _attrs = ("N", _attr_N, "T", _attr_T)
 924   _result = _execute.execute(b"Concat", 1, inputs=_inputs_flat, attrs=_attrs,
 925                              ctx=_ctx, name=name)
 926   _execute.record_gradient(
 927       "Concat", _inputs_flat, _attrs, _result, name)
 928   _result, = _result
 929   return _result
 930 
 931 
 932 def concat_offset(concat_dim, shape, name=None):
 933   r"""Computes offsets of concat inputs within its output.
 934 
 935   For example:
 936 
 937   ```
 938   # 'x' is [2, 2, 7]
 939   # 'y' is [2, 3, 7]
 940   # 'z' is [2, 5, 7]
 941   concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
 942   ```
 943 
 944   This is typically used by gradient computations for a concat operation.
 945 
 946   Args:
 947     concat_dim: A `Tensor` of type `int32`.
 948       The dimension along which to concatenate.
 949     shape: A list of at least 2 `Tensor` objects with type `int32`.
 950       The `N` int32 vectors representing shape of tensors being concatenated.
 951     name: A name for the operation (optional).
 952 
 953   Returns:
 954     A list with the same length as `shape` of `Tensor` objects with type `int32`.
 955   """
 956   _ctx = _context._context
 957   if _ctx is None or not _ctx._eager_context.is_eager:
 958     if not isinstance(shape, (list, tuple)):
 959       raise TypeError(
 960           "Expected list for 'shape' argument to "
 961           "'concat_offset' Op, not %r." % shape)
 962     _attr_N = len(shape)
 963     _, _, _op = _op_def_lib._apply_op_helper(
 964         "ConcatOffset", concat_dim=concat_dim, shape=shape, name=name)
 965     _result = _op.outputs[:]
 966     _inputs_flat = _op.inputs
 967     _attrs = ("N", _op.get_attr("N"))
 968     _execute.record_gradient(
 969       "ConcatOffset", _inputs_flat, _attrs, _result, name)
 970     return _result
 971 
 972   else:
 973     try:
 974       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 975         _ctx._context_handle, _ctx._eager_context.device_name, "ConcatOffset",
 976         name, _ctx._post_execution_callbacks, concat_dim, shape)
 977       return _result
 978     except _core._FallbackException:
 979       return concat_offset_eager_fallback(
 980           concat_dim, shape, name=name, ctx=_ctx)
 981     except _core._NotOkStatusException as e:
 982       if name is not None:
 983         message = e.message + " name: " + name
 984       else:
 985         message = e.message
 986       _six.raise_from(_core._status_to_exception(e.code, message), None)
 987 
 988 
 989 def concat_offset_eager_fallback(concat_dim, shape, name=None, ctx=None):
 990   r"""This is the slowpath function for Eager mode.
 991   This is for function concat_offset
 992   """
 993   _ctx = ctx if ctx else _context.context()
 994   if not isinstance(shape, (list, tuple)):
 995     raise TypeError(
 996         "Expected list for 'shape' argument to "
 997         "'concat_offset' Op, not %r." % shape)
 998   _attr_N = len(shape)
 999   concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)
1000   shape = _ops.convert_n_to_tensor(shape, _dtypes.int32)
1001   _inputs_flat = [concat_dim] + list(shape)
1002   _attrs = ("N", _attr_N)
1003   _result = _execute.execute(b"ConcatOffset", _attr_N, inputs=_inputs_flat,
1004                              attrs=_attrs, ctx=_ctx, name=name)
1005   _execute.record_gradient(
1006       "ConcatOffset", _inputs_flat, _attrs, _result, name)
1007   return _result
1008 
1009 
1010 def concat_v2(values, axis, name=None):
1011   r"""Concatenates tensors along one dimension.
1012 
1013   Args:
1014     values: A list of at least 2 `Tensor` objects with the same type.
1015       List of `N` Tensors to concatenate. Their ranks and types must match,
1016       and their sizes must match in all dimensions except `concat_dim`.
1017     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1018       0-D.  The dimension along which to concatenate.  Must be in the
1019       range [-rank(values), rank(values)).
1020     name: A name for the operation (optional).
1021 
1022   Returns:
1023     A `Tensor`. Has the same type as `values`.
1024   """
1025   _ctx = _context._context
1026   if _ctx is None or not _ctx._eager_context.is_eager:
1027     if not isinstance(values, (list, tuple)):
1028       raise TypeError(
1029           "Expected list for 'values' argument to "
1030           "'concat_v2' Op, not %r." % values)
1031     _attr_N = len(values)
1032     _, _, _op = _op_def_lib._apply_op_helper(
1033         "ConcatV2", values=values, axis=axis, name=name)
1034     _result = _op.outputs[:]
1035     _inputs_flat = _op.inputs
1036     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "Tidx",
1037               _op.get_attr("Tidx"))
1038     _execute.record_gradient(
1039       "ConcatV2", _inputs_flat, _attrs, _result, name)
1040     _result, = _result
1041     return _result
1042 
1043   else:
1044     try:
1045       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1046         _ctx._context_handle, _ctx._eager_context.device_name, "ConcatV2",
1047         name, _ctx._post_execution_callbacks, values, axis)
1048       return _result
1049     except _core._FallbackException:
1050       return concat_v2_eager_fallback(
1051           values, axis, name=name, ctx=_ctx)
1052     except _core._NotOkStatusException as e:
1053       if name is not None:
1054         message = e.message + " name: " + name
1055       else:
1056         message = e.message
1057       _six.raise_from(_core._status_to_exception(e.code, message), None)
1058 
1059 
1060 def concat_v2_eager_fallback(values, axis, name=None, ctx=None):
1061   r"""This is the slowpath function for Eager mode.
1062   This is for function concat_v2
1063   """
1064   _ctx = ctx if ctx else _context.context()
1065   if not isinstance(values, (list, tuple)):
1066     raise TypeError(
1067         "Expected list for 'values' argument to "
1068         "'concat_v2' Op, not %r." % values)
1069   _attr_N = len(values)
1070   _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)
1071   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
1072   _inputs_flat = list(values) + [axis]
1073   _attrs = ("N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx)
1074   _result = _execute.execute(b"ConcatV2", 1, inputs=_inputs_flat,
1075                              attrs=_attrs, ctx=_ctx, name=name)
1076   _execute.record_gradient(
1077       "ConcatV2", _inputs_flat, _attrs, _result, name)
1078   _result, = _result
1079   return _result
1080 
1081 
1082 def conjugate_transpose(x, perm, name=None):
1083   r"""Shuffle dimensions of x according to a permutation and conjugate the result.
1084 
1085   The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
1086     `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
1087     `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
1088 
1089   Args:
1090     x: A `Tensor`.
1091     perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1092     name: A name for the operation (optional).
1093 
1094   Returns:
1095     A `Tensor`. Has the same type as `x`.
1096   """
1097   _ctx = _context._context
1098   if _ctx is None or not _ctx._eager_context.is_eager:
1099     _, _, _op = _op_def_lib._apply_op_helper(
1100         "ConjugateTranspose", x=x, perm=perm, name=name)
1101     _result = _op.outputs[:]
1102     _inputs_flat = _op.inputs
1103     _attrs = ("T", _op.get_attr("T"), "Tperm", _op.get_attr("Tperm"))
1104     _execute.record_gradient(
1105       "ConjugateTranspose", _inputs_flat, _attrs, _result, name)
1106     _result, = _result
1107     return _result
1108 
1109   else:
1110     try:
1111       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1112         _ctx._context_handle, _ctx._eager_context.device_name,
1113         "ConjugateTranspose", name, _ctx._post_execution_callbacks, x, perm)
1114       return _result
1115     except _core._FallbackException:
1116       return conjugate_transpose_eager_fallback(
1117           x, perm, name=name, ctx=_ctx)
1118     except _core._NotOkStatusException as e:
1119       if name is not None:
1120         message = e.message + " name: " + name
1121       else:
1122         message = e.message
1123       _six.raise_from(_core._status_to_exception(e.code, message), None)
1124 
1125 
1126 def conjugate_transpose_eager_fallback(x, perm, name=None, ctx=None):
1127   r"""This is the slowpath function for Eager mode.
1128   This is for function conjugate_transpose
1129   """
1130   _ctx = ctx if ctx else _context.context()
1131   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1132   _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32)
1133   _inputs_flat = [x, perm]
1134   _attrs = ("T", _attr_T, "Tperm", _attr_Tperm)
1135   _result = _execute.execute(b"ConjugateTranspose", 1, inputs=_inputs_flat,
1136                              attrs=_attrs, ctx=_ctx, name=name)
1137   _execute.record_gradient(
1138       "ConjugateTranspose", _inputs_flat, _attrs, _result, name)
1139   _result, = _result
1140   return _result
1141 
1142 
1143 def const(value, dtype, name=None):
1144   r"""Returns a constant tensor.
1145 
1146   Args:
1147     value: A `tf.TensorProto`. Attr `value` is the tensor to return.
1148     dtype: A `tf.DType`.
1149     name: A name for the operation (optional).
1150 
1151   Returns:
1152     A `Tensor` of type `dtype`.
1153   """
1154   _ctx = _context._context
1155   if _ctx is None or not _ctx._eager_context.is_eager:
1156     value = _execute.make_tensor(value, "value")
1157     dtype = _execute.make_type(dtype, "dtype")
1158     _, _, _op = _op_def_lib._apply_op_helper(
1159         "Const", value=value, dtype=dtype, name=name)
1160     _result = _op.outputs[:]
1161     _inputs_flat = _op.inputs
1162     _attrs = ("value", _op.get_attr("value"), "dtype", _op.get_attr("dtype"))
1163     _execute.record_gradient(
1164       "Const", _inputs_flat, _attrs, _result, name)
1165     _result, = _result
1166     return _result
1167 
1168   else:
1169     try:
1170       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1171         _ctx._context_handle, _ctx._eager_context.device_name, "Const", name,
1172         _ctx._post_execution_callbacks, "value", value, "dtype", dtype)
1173       return _result
1174     except _core._FallbackException:
1175       return const_eager_fallback(
1176           value=value, dtype=dtype, name=name, ctx=_ctx)
1177     except _core._NotOkStatusException as e:
1178       if name is not None:
1179         message = e.message + " name: " + name
1180       else:
1181         message = e.message
1182       _six.raise_from(_core._status_to_exception(e.code, message), None)
1183 
1184 
1185 def const_eager_fallback(value, dtype, name=None, ctx=None):
1186   r"""This is the slowpath function for Eager mode.
1187   This is for function const
1188   """
1189   _ctx = ctx if ctx else _context.context()
1190   value = _execute.make_tensor(value, "value")
1191   dtype = _execute.make_type(dtype, "dtype")
1192   _inputs_flat = []
1193   _attrs = ("value", value, "dtype", dtype)
1194   _result = _execute.execute(b"Const", 1, inputs=_inputs_flat, attrs=_attrs,
1195                              ctx=_ctx, name=name)
1196   _execute.record_gradient(
1197       "Const", _inputs_flat, _attrs, _result, name)
1198   _result, = _result
1199   return _result
1200 
1201 
1202 def debug_gradient_identity(input, name=None):
1203   r"""Identity op for gradient debugging.
1204 
1205   This op is hidden from public in Python. It is used by TensorFlow Debugger to
1206   register gradient tensors for gradient debugging.
1207   This op operates on non-reference-type tensors.
1208 
1209   Args:
1210     input: A `Tensor`.
1211     name: A name for the operation (optional).
1212 
1213   Returns:
1214     A `Tensor`. Has the same type as `input`.
1215   """
1216   _ctx = _context._context
1217   if _ctx is None or not _ctx._eager_context.is_eager:
1218     _, _, _op = _op_def_lib._apply_op_helper(
1219         "DebugGradientIdentity", input=input, name=name)
1220     _result = _op.outputs[:]
1221     _inputs_flat = _op.inputs
1222     _attrs = ("T", _op.get_attr("T"))
1223     _execute.record_gradient(
1224       "DebugGradientIdentity", _inputs_flat, _attrs, _result, name)
1225     _result, = _result
1226     return _result
1227 
1228   else:
1229     try:
1230       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1231         _ctx._context_handle, _ctx._eager_context.device_name,
1232         "DebugGradientIdentity", name, _ctx._post_execution_callbacks, input)
1233       return _result
1234     except _core._FallbackException:
1235       return debug_gradient_identity_eager_fallback(
1236           input, name=name, ctx=_ctx)
1237     except _core._NotOkStatusException as e:
1238       if name is not None:
1239         message = e.message + " name: " + name
1240       else:
1241         message = e.message
1242       _six.raise_from(_core._status_to_exception(e.code, message), None)
1243 
1244 
1245 def debug_gradient_identity_eager_fallback(input, name=None, ctx=None):
1246   r"""This is the slowpath function for Eager mode.
1247   This is for function debug_gradient_identity
1248   """
1249   _ctx = ctx if ctx else _context.context()
1250   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
1251   _inputs_flat = [input]
1252   _attrs = ("T", _attr_T)
1253   _result = _execute.execute(b"DebugGradientIdentity", 1, inputs=_inputs_flat,
1254                              attrs=_attrs, ctx=_ctx, name=name)
1255   _execute.record_gradient(
1256       "DebugGradientIdentity", _inputs_flat, _attrs, _result, name)
1257   _result, = _result
1258   return _result
1259 
1260 
1261 def debug_gradient_ref_identity(input, name=None):
1262   r"""Identity op for gradient debugging.
1263 
1264   This op is hidden from public in Python. It is used by TensorFlow Debugger to
1265   register gradient tensors for gradient debugging.
1266   This op operates on reference-type tensors.
1267 
1268   Args:
1269     input: A mutable `Tensor`.
1270     name: A name for the operation (optional).
1271 
1272   Returns:
1273     A mutable `Tensor`. Has the same type as `input`.
1274   """
1275   _ctx = _context._context
1276   if _ctx is None or not _ctx._eager_context.is_eager:
1277     _, _, _op = _op_def_lib._apply_op_helper(
1278         "DebugGradientRefIdentity", input=input, name=name)
1279     _result = _op.outputs[:]
1280     _inputs_flat = _op.inputs
1281     _attrs = ("T", _op.get_attr("T"))
1282     _execute.record_gradient(
1283       "DebugGradientRefIdentity", _inputs_flat, _attrs, _result, name)
1284     _result, = _result
1285     return _result
1286 
1287   else:
1288     raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.")
1289 
1290 
1291   raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.")
1292 
1293 def deep_copy(x, name=None):
1294   r"""Makes a copy of `x`.
1295 
1296   Args:
1297     x: A `Tensor`. The source tensor of type `T`.
1298     name: A name for the operation (optional).
1299 
1300   Returns:
1301     A `Tensor`. Has the same type as `x`.
1302   """
1303   _ctx = _context._context
1304   if _ctx is None or not _ctx._eager_context.is_eager:
1305     _, _, _op = _op_def_lib._apply_op_helper(
1306         "DeepCopy", x=x, name=name)
1307     _result = _op.outputs[:]
1308     _inputs_flat = _op.inputs
1309     _attrs = ("T", _op.get_attr("T"))
1310     _execute.record_gradient(
1311       "DeepCopy", _inputs_flat, _attrs, _result, name)
1312     _result, = _result
1313     return _result
1314 
1315   else:
1316     try:
1317       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1318         _ctx._context_handle, _ctx._eager_context.device_name, "DeepCopy",
1319         name, _ctx._post_execution_callbacks, x)
1320       return _result
1321     except _core._FallbackException:
1322       return deep_copy_eager_fallback(
1323           x, name=name, ctx=_ctx)
1324     except _core._NotOkStatusException as e:
1325       if name is not None:
1326         message = e.message + " name: " + name
1327       else:
1328         message = e.message
1329       _six.raise_from(_core._status_to_exception(e.code, message), None)
1330 
1331 
1332 def deep_copy_eager_fallback(x, name=None, ctx=None):
1333   r"""This is the slowpath function for Eager mode.
1334   This is for function deep_copy
1335   """
1336   _ctx = ctx if ctx else _context.context()
1337   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1338   _inputs_flat = [x]
1339   _attrs = ("T", _attr_T)
1340   _result = _execute.execute(b"DeepCopy", 1, inputs=_inputs_flat,
1341                              attrs=_attrs, ctx=_ctx, name=name)
1342   _execute.record_gradient(
1343       "DeepCopy", _inputs_flat, _attrs, _result, name)
1344   _result, = _result
1345   return _result
1346 
1347 
1348 def depth_to_space(input, block_size, data_format="NHWC", name=None):
1349   r"""DepthToSpace for tensors of type T.
1350 
1351   Rearranges data from depth into blocks of spatial data.
1352   This is the reverse transformation of SpaceToDepth. More specifically,
1353   this op outputs a copy of the input tensor where values from the `depth`
1354   dimension are moved in spatial blocks to the `height` and `width` dimensions.
1355   The attr `block_size` indicates the input block size and how the data is moved.
1356 
1357     * Chunks of data of size `block_size * block_size` from depth are rearranged
1358       into non-overlapping blocks of size `block_size x block_size`
1359     * The width the output tensor is `input_depth * block_size`, whereas the
1360       height is `input_height * block_size`.
1361     * The Y, X coordinates within each block of the output image are determined
1362       by the high order component of the input channel index.
1363     * The depth of the input tensor must be divisible by
1364       `block_size * block_size`.
1365 
1366   The `data_format` attr specifies the layout of the input and output tensors
1367   with the following options:
1368     "NHWC": `[ batch, height, width, channels ]`
1369     "NCHW": `[ batch, channels, height, width ]`
1370     "NCHW_VECT_C":
1371         `qint8 [ batch, channels / 4, height, width, 4 ]`
1372 
1373   It is useful to consider the operation as transforming a 6-D Tensor.
1374   e.g. for data_format = NHWC,
1375        Each element in the input tensor can be specified via 6 coordinates,
1376        ordered by decreasing memory layout significance as:
1377        n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
1378                           within the input image, bX, bY means coordinates
1379                           within the output block, oC means output channels).
1380        The output would be the input transposed to the following layout:
1381        n,iY,bY,iX,bX,oC
1382 
1383   This operation is useful for resizing the activations between convolutions
1384   (but keeping all data), e.g. instead of pooling. It is also useful for training
1385   purely convolutional models.
1386 
1387   For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
1388   block_size = 2:
1389 
1390   ```
1391   x = [[[[1, 2, 3, 4]]]]
1392 
1393   ```
1394 
1395   This operation will output a tensor of shape `[1, 2, 2, 1]`:
1396 
1397   ```
1398      [[[[1], [2]],
1399        [[3], [4]]]]
1400   ```
1401 
1402   Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
1403   the corresponding output will have 2x2 elements and will have a depth of
1404   1 channel (1 = `4 / (block_size * block_size)`).
1405   The output element shape is `[2, 2, 1]`.
1406 
1407   For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
1408 
1409   ```
1410   x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
1411   ```
1412 
1413   This operation, for block size of 2, will return the following tensor of shape
1414   `[1, 2, 2, 3]`
1415 
1416   ```
1417      [[[[1, 2, 3], [4, 5, 6]],
1418        [[7, 8, 9], [10, 11, 12]]]]
1419 
1420   ```
1421 
1422   Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
1423 
1424   ```
1425   x =  [[[[1, 2, 3, 4],
1426          [5, 6, 7, 8]],
1427         [[9, 10, 11, 12],
1428          [13, 14, 15, 16]]]]
1429   ```
1430 
1431   the operator will return the following tensor of shape `[1 4 4 1]`:
1432 
1433   ```
1434   x = [[[ [1],   [2],  [5],  [6]],
1435         [ [3],   [4],  [7],  [8]],
1436         [ [9],  [10], [13],  [14]],
1437         [ [11], [12], [15],  [16]]]]
1438 
1439   ```
1440 
1441   Args:
1442     input: A `Tensor`.
1443     block_size: An `int` that is `>= 2`.
1444       The size of the spatial block, same as in Space2Depth.
1445     data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
1446     name: A name for the operation (optional).
1447 
1448   Returns:
1449     A `Tensor`. Has the same type as `input`.
1450   """
1451   _ctx = _context._context
1452   if _ctx is None or not _ctx._eager_context.is_eager:
1453     block_size = _execute.make_int(block_size, "block_size")
1454     if data_format is None:
1455       data_format = "NHWC"
1456     data_format = _execute.make_str(data_format, "data_format")
1457     _, _, _op = _op_def_lib._apply_op_helper(
1458         "DepthToSpace", input=input, block_size=block_size,
1459         data_format=data_format, name=name)
1460     _result = _op.outputs[:]
1461     _inputs_flat = _op.inputs
1462     _attrs = ("T", _op.get_attr("T"), "block_size",
1463               _op.get_attr("block_size"), "data_format",
1464               _op.get_attr("data_format"))
1465     _execute.record_gradient(
1466       "DepthToSpace", _inputs_flat, _attrs, _result, name)
1467     _result, = _result
1468     return _result
1469 
1470   else:
1471     try:
1472       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1473         _ctx._context_handle, _ctx._eager_context.device_name, "DepthToSpace",
1474         name, _ctx._post_execution_callbacks, input, "block_size", block_size,
1475         "data_format", data_format)
1476       return _result
1477     except _core._FallbackException:
1478       return depth_to_space_eager_fallback(
1479           input, block_size=block_size, data_format=data_format, name=name,
1480           ctx=_ctx)
1481     except _core._NotOkStatusException as e:
1482       if name is not None:
1483         message = e.message + " name: " + name
1484       else:
1485         message = e.message
1486       _six.raise_from(_core._status_to_exception(e.code, message), None)
1487 
1488 
1489 def depth_to_space_eager_fallback(input, block_size, data_format="NHWC", name=None, ctx=None):
1490   r"""This is the slowpath function for Eager mode.
1491   This is for function depth_to_space
1492   """
1493   _ctx = ctx if ctx else _context.context()
1494   block_size = _execute.make_int(block_size, "block_size")
1495   if data_format is None:
1496     data_format = "NHWC"
1497   data_format = _execute.make_str(data_format, "data_format")
1498   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
1499   _inputs_flat = [input]
1500   _attrs = ("T", _attr_T, "block_size", block_size, "data_format",
1501   data_format)
1502   _result = _execute.execute(b"DepthToSpace", 1, inputs=_inputs_flat,
1503                              attrs=_attrs, ctx=_ctx, name=name)
1504   _execute.record_gradient(
1505       "DepthToSpace", _inputs_flat, _attrs, _result, name)
1506   _result, = _result
1507   return _result
1508 
1509 
1510 @tf_export('quantization.dequantize', 'dequantize')
1511 @deprecated_endpoints('dequantize')
1512 def dequantize(input, min_range, max_range, mode="MIN_COMBINED", name=None):
1513   r"""Dequantize the 'input' tensor into a float Tensor.
1514 
1515   [min_range, max_range] are scalar floats that specify the range for
1516   the 'input' data. The 'mode' attribute controls exactly which calculations are
1517   used to convert the float values to their quantized equivalents.
1518 
1519   In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
1520 
1521   ```
1522   if T == qint8, in[i] += (range(T) + 1)/ 2.0
1523   out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
1524   ```
1525   here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
1526 
1527   *MIN_COMBINED Mode Example*
1528 
1529   If the input comes from a QuantizedRelu6, the output type is
1530   quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
1531   0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
1532   Dequantize on quint8 will take each value, cast to float, and multiply
1533   by 6 / 255.
1534   Note that if quantizedtype is qint8, the operation will additionally add
1535   each value by 128 prior to casting.
1536 
1537   If the mode is 'MIN_FIRST', then this approach is used:
1538 
1539   ```c++
1540   num_discrete_values = 1 << (# of bits in T)
1541   range_adjust = num_discrete_values / (num_discrete_values - 1)
1542   range = (range_max - range_min) * range_adjust
1543   range_scale = range / num_discrete_values
1544   const double offset_input = static_cast<double>(input) - lowest_quantized;
1545   result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
1546   ```
1547 
1548   *SCALED mode Example*
1549 
1550   `SCALED` mode matches the quantization approach used in
1551   `QuantizeAndDequantize{V2|V3}`.
1552 
1553   If the mode is `SCALED`, we do not use the full range of the output type,
1554   choosing to elide the lowest possible value for symmetry (e.g., output range is
1555   -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
1556   0.
1557 
1558   We first find the range of values in our tensor. The
1559   range we use is always centered on 0, so we find m such that
1560   ```c++
1561     m = max(abs(input_min), abs(input_max))
1562   ```
1563 
1564   Our input tensor range is then `[-m, m]`.
1565 
1566   Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
1567   If T is signed, this is
1568   ```
1569     num_bits = sizeof(T) * 8
1570     [min_fixed, max_fixed] =
1571         [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
1572   ```
1573 
1574   Otherwise, if T is unsigned, the fixed-point range is
1575   ```
1576     [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
1577   ```
1578 
1579   From this we compute our scaling factor, s:
1580   ```c++
1581     s = (2 * m) / (max_fixed - min_fixed)
1582   ```
1583 
1584   Now we can dequantize the elements of our tensor:
1585   ```c++
1586   result = input * s
1587   ```
1588 
1589   Args:
1590     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
1591     min_range: A `Tensor` of type `float32`.
1592       The minimum scalar value possibly produced for the input.
1593     max_range: A `Tensor` of type `float32`.
1594       The maximum scalar value possibly produced for the input.
1595     mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`.
1596     name: A name for the operation (optional).
1597 
1598   Returns:
1599     A `Tensor` of type `float32`.
1600   """
1601   _ctx = _context._context
1602   if _ctx is None or not _ctx._eager_context.is_eager:
1603     if mode is None:
1604       mode = "MIN_COMBINED"
1605     mode = _execute.make_str(mode, "mode")
1606     _, _, _op = _op_def_lib._apply_op_helper(
1607         "Dequantize", input=input, min_range=min_range, max_range=max_range,
1608         mode=mode, name=name)
1609     _result = _op.outputs[:]
1610     _inputs_flat = _op.inputs
1611     _attrs = ("T", _op.get_attr("T"), "mode", _op.get_attr("mode"))
1612     _execute.record_gradient(
1613       "Dequantize", _inputs_flat, _attrs, _result, name)
1614     _result, = _result
1615     return _result
1616 
1617   else:
1618     try:
1619       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1620         _ctx._context_handle, _ctx._eager_context.device_name, "Dequantize",
1621         name, _ctx._post_execution_callbacks, input, min_range, max_range,
1622         "mode", mode)
1623       return _result
1624     except _core._FallbackException:
1625       return dequantize_eager_fallback(
1626           input, min_range, max_range, mode=mode, name=name, ctx=_ctx)
1627     except _core._NotOkStatusException as e:
1628       if name is not None:
1629         message = e.message + " name: " + name
1630       else:
1631         message = e.message
1632       _six.raise_from(_core._status_to_exception(e.code, message), None)
1633 
1634 
1635 def dequantize_eager_fallback(input, min_range, max_range, mode="MIN_COMBINED", name=None, ctx=None):
1636   r"""This is the slowpath function for Eager mode.
1637   This is for function dequantize
1638   """
1639   _ctx = ctx if ctx else _context.context()
1640   if mode is None:
1641     mode = "MIN_COMBINED"
1642   mode = _execute.make_str(mode, "mode")
1643   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
1644   min_range = _ops.convert_to_tensor(min_range, _dtypes.float32)
1645   max_range = _ops.convert_to_tensor(max_range, _dtypes.float32)
1646   _inputs_flat = [input, min_range, max_range]
1647   _attrs = ("T", _attr_T, "mode", mode)
1648   _result = _execute.execute(b"Dequantize", 1, inputs=_inputs_flat,
1649                              attrs=_attrs, ctx=_ctx, name=name)
1650   _execute.record_gradient(
1651       "Dequantize", _inputs_flat, _attrs, _result, name)
1652   _result, = _result
1653   return _result
1654 
1655 
1656 @tf_export('linalg.tensor_diag', 'diag')
1657 @deprecated_endpoints('diag')
1658 def diag(diagonal, name=None):
1659   r"""Returns a diagonal tensor with a given diagonal values.
1660 
1661   Given a `diagonal`, this operation returns a tensor with the `diagonal` and
1662   everything else padded with zeros. The diagonal is computed as follows:
1663 
1664   Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
1665   rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
1666 
1667   `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
1668 
1669   For example:
1670 
1671   ```
1672   # 'diagonal' is [1, 2, 3, 4]
1673   tf.diag(diagonal) ==> [[1, 0, 0, 0]
1674                          [0, 2, 0, 0]
1675                          [0, 0, 3, 0]
1676                          [0, 0, 0, 4]]
1677   ```
1678 
1679   Args:
1680     diagonal: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
1681       Rank k tensor where k is at most 1.
1682     name: A name for the operation (optional).
1683 
1684   Returns:
1685     A `Tensor`. Has the same type as `diagonal`.
1686   """
1687   _ctx = _context._context
1688   if _ctx is None or not _ctx._eager_context.is_eager:
1689     _, _, _op = _op_def_lib._apply_op_helper(
1690         "Diag", diagonal=diagonal, name=name)
1691     _result = _op.outputs[:]
1692     _inputs_flat = _op.inputs
1693     _attrs = ("T", _op.get_attr("T"))
1694     _execute.record_gradient(
1695       "Diag", _inputs_flat, _attrs, _result, name)
1696     _result, = _result
1697     return _result
1698 
1699   else:
1700     try:
1701       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1702         _ctx._context_handle, _ctx._eager_context.device_name, "Diag", name,
1703         _ctx._post_execution_callbacks, diagonal)
1704       return _result
1705     except _core._FallbackException:
1706       return diag_eager_fallback(
1707           diagonal, name=name, ctx=_ctx)
1708     except _core._NotOkStatusException as e:
1709       if name is not None:
1710         message = e.message + " name: " + name
1711       else:
1712         message = e.message
1713       _six.raise_from(_core._status_to_exception(e.code, message), None)
1714 
1715 
1716 def diag_eager_fallback(diagonal, name=None, ctx=None):
1717   r"""This is the slowpath function for Eager mode.
1718   This is for function diag
1719   """
1720   _ctx = ctx if ctx else _context.context()
1721   _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)
1722   _inputs_flat = [diagonal]
1723   _attrs = ("T", _attr_T)
1724   _result = _execute.execute(b"Diag", 1, inputs=_inputs_flat, attrs=_attrs,
1725                              ctx=_ctx, name=name)
1726   _execute.record_gradient(
1727       "Diag", _inputs_flat, _attrs, _result, name)
1728   _result, = _result
1729   return _result
1730 
1731 
1732 @tf_export('linalg.tensor_diag_part', 'diag_part')
1733 @deprecated_endpoints('diag_part')
1734 def diag_part(input, name=None):
1735   r"""Returns the diagonal part of the tensor.
1736 
1737   This operation returns a tensor with the `diagonal` part
1738   of the `input`. The `diagonal` part is computed as follows:
1739 
1740   Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
1741   tensor of rank `k` with dimensions `[D1,..., Dk]` where:
1742 
1743   `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
1744 
1745   For example:
1746 
1747   ```
1748   # 'input' is [[1, 0, 0, 0]
1749                 [0, 2, 0, 0]
1750                 [0, 0, 3, 0]
1751                 [0, 0, 0, 4]]
1752 
1753   tf.diag_part(input) ==> [1, 2, 3, 4]
1754   ```
1755 
1756   Args:
1757     input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
1758       Rank k tensor where k is even and not zero.
1759     name: A name for the operation (optional).
1760 
1761   Returns:
1762     A `Tensor`. Has the same type as `input`.
1763   """
1764   _ctx = _context._context
1765   if _ctx is None or not _ctx._eager_context.is_eager:
1766     _, _, _op = _op_def_lib._apply_op_helper(
1767         "DiagPart", input=input, name=name)
1768     _result = _op.outputs[:]
1769     _inputs_flat = _op.inputs
1770     _attrs = ("T", _op.get_attr("T"))
1771     _execute.record_gradient(
1772       "DiagPart", _inputs_flat, _attrs, _result, name)
1773     _result, = _result
1774     return _result
1775 
1776   else:
1777     try:
1778       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1779         _ctx._context_handle, _ctx._eager_context.device_name, "DiagPart",
1780         name, _ctx._post_execution_callbacks, input)
1781       return _result
1782     except _core._FallbackException:
1783       return diag_part_eager_fallback(
1784           input, name=name, ctx=_ctx)
1785     except _core._NotOkStatusException as e:
1786       if name is not None:
1787         message = e.message + " name: " + name
1788       else:
1789         message = e.message
1790       _six.raise_from(_core._status_to_exception(e.code, message), None)
1791 
1792 
1793 def diag_part_eager_fallback(input, name=None, ctx=None):
1794   r"""This is the slowpath function for Eager mode.
1795   This is for function diag_part
1796   """
1797   _ctx = ctx if ctx else _context.context()
1798   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
1799   _inputs_flat = [input]
1800   _attrs = ("T", _attr_T)
1801   _result = _execute.execute(b"DiagPart", 1, inputs=_inputs_flat,
1802                              attrs=_attrs, ctx=_ctx, name=name)
1803   _execute.record_gradient(
1804       "DiagPart", _inputs_flat, _attrs, _result, name)
1805   _result, = _result
1806   return _result
1807 
1808 
1809 def edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None):
1810   r"""Computes the (possibly normalized) Levenshtein Edit Distance.
1811 
1812   The inputs are variable-length sequences provided by SparseTensors
1813     (hypothesis_indices, hypothesis_values, hypothesis_shape)
1814   and
1815     (truth_indices, truth_values, truth_shape).
1816 
1817   The inputs are:
1818 
1819   Args:
1820     hypothesis_indices: A `Tensor` of type `int64`.
1821       The indices of the hypothesis list SparseTensor.
1822       This is an N x R int64 matrix.
1823     hypothesis_values: A `Tensor`.
1824       The values of the hypothesis list SparseTensor.
1825       This is an N-length vector.
1826     hypothesis_shape: A `Tensor` of type `int64`.
1827       The shape of the hypothesis list SparseTensor.
1828       This is an R-length vector.
1829     truth_indices: A `Tensor` of type `int64`.
1830       The indices of the truth list SparseTensor.
1831       This is an M x R int64 matrix.
1832     truth_values: A `Tensor`. Must have the same type as `hypothesis_values`.
1833       The values of the truth list SparseTensor.
1834       This is an M-length vector.
1835     truth_shape: A `Tensor` of type `int64`. truth indices, vector.
1836     normalize: An optional `bool`. Defaults to `True`.
1837       boolean (if true, edit distances are normalized by length of truth).
1838 
1839       The output is:
1840     name: A name for the operation (optional).
1841 
1842   Returns:
1843     A `Tensor` of type `float32`.
1844   """
1845   _ctx = _context._context
1846   if _ctx is None or not _ctx._eager_context.is_eager:
1847     if normalize is None:
1848       normalize = True
1849     normalize = _execute.make_bool(normalize, "normalize")
1850     _, _, _op = _op_def_lib._apply_op_helper(
1851         "EditDistance", hypothesis_indices=hypothesis_indices,
1852         hypothesis_values=hypothesis_values,
1853         hypothesis_shape=hypothesis_shape, truth_indices=truth_indices,
1854         truth_values=truth_values, truth_shape=truth_shape,
1855         normalize=normalize, name=name)
1856     _result = _op.outputs[:]
1857     _inputs_flat = _op.inputs
1858     _attrs = ("normalize", _op.get_attr("normalize"), "T", _op.get_attr("T"))
1859     _execute.record_gradient(
1860       "EditDistance", _inputs_flat, _attrs, _result, name)
1861     _result, = _result
1862     return _result
1863 
1864   else:
1865     try:
1866       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1867         _ctx._context_handle, _ctx._eager_context.device_name, "EditDistance",
1868         name, _ctx._post_execution_callbacks, hypothesis_indices,
1869         hypothesis_values, hypothesis_shape, truth_indices, truth_values,
1870         truth_shape, "normalize", normalize)
1871       return _result
1872     except _core._FallbackException:
1873       return edit_distance_eager_fallback(
1874           hypothesis_indices, hypothesis_values, hypothesis_shape,
1875           truth_indices, truth_values, truth_shape, normalize=normalize,
1876           name=name, ctx=_ctx)
1877     except _core._NotOkStatusException as e:
1878       if name is not None:
1879         message = e.message + " name: " + name
1880       else:
1881         message = e.message
1882       _six.raise_from(_core._status_to_exception(e.code, message), None)
1883 
1884 
1885 def edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None, ctx=None):
1886   r"""This is the slowpath function for Eager mode.
1887   This is for function edit_distance
1888   """
1889   _ctx = ctx if ctx else _context.context()
1890   if normalize is None:
1891     normalize = True
1892   normalize = _execute.make_bool(normalize, "normalize")
1893   _attr_T, _inputs_T = _execute.args_to_matching_eager([hypothesis_values, truth_values], _ctx)
1894   (hypothesis_values, truth_values) = _inputs_T
1895   hypothesis_indices = _ops.convert_to_tensor(hypothesis_indices, _dtypes.int64)
1896   hypothesis_shape = _ops.convert_to_tensor(hypothesis_shape, _dtypes.int64)
1897   truth_indices = _ops.convert_to_tensor(truth_indices, _dtypes.int64)
1898   truth_shape = _ops.convert_to_tensor(truth_shape, _dtypes.int64)
1899   _inputs_flat = [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape]
1900   _attrs = ("normalize", normalize, "T", _attr_T)
1901   _result = _execute.execute(b"EditDistance", 1, inputs=_inputs_flat,
1902                              attrs=_attrs, ctx=_ctx, name=name)
1903   _execute.record_gradient(
1904       "EditDistance", _inputs_flat, _attrs, _result, name)
1905   _result, = _result
1906   return _result
1907 
1908 
1909 def empty(shape, dtype, init=False, name=None):
1910   r"""Creates a tensor with the given shape.
1911 
1912 This operation creates a tensor of `shape` and `dtype`.
1913 
1914   Args:
1915     shape: A `Tensor` of type `int32`.
1916       1-D. Represents the shape of the output tensor.
1917     dtype: A `tf.DType`.
1918     init: An optional `bool`. Defaults to `False`.
1919       If True, initialize the returned tensor with the default value of dtype.  Otherwise, the implementation is free not to initializethe tensor's content.
1920     name: A name for the operation (optional).
1921 
1922   Returns:
1923     A `Tensor` of type `dtype`.
1924   """
1925   _ctx = _context._context
1926   if _ctx is None or not _ctx._eager_context.is_eager:
1927     dtype = _execute.make_type(dtype, "dtype")
1928     if init is None:
1929       init = False
1930     init = _execute.make_bool(init, "init")
1931     _, _, _op = _op_def_lib._apply_op_helper(
1932         "Empty", shape=shape, dtype=dtype, init=init, name=name)
1933     _result = _op.outputs[:]
1934     _inputs_flat = _op.inputs
1935     _attrs = ("dtype", _op.get_attr("dtype"), "init", _op.get_attr("init"))
1936     _execute.record_gradient(
1937       "Empty", _inputs_flat, _attrs, _result, name)
1938     _result, = _result
1939     return _result
1940 
1941   else:
1942     try:
1943       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1944         _ctx._context_handle, _ctx._eager_context.device_name, "Empty", name,
1945         _ctx._post_execution_callbacks, shape, "dtype", dtype, "init", init)
1946       return _result
1947     except _core._FallbackException:
1948       return empty_eager_fallback(
1949           shape, dtype=dtype, init=init, name=name, ctx=_ctx)
1950     except _core._NotOkStatusException as e:
1951       if name is not None:
1952         message = e.message + " name: " + name
1953       else:
1954         message = e.message
1955       _six.raise_from(_core._status_to_exception(e.code, message), None)
1956 
1957 
1958 def empty_eager_fallback(shape, dtype, init=False, name=None, ctx=None):
1959   r"""This is the slowpath function for Eager mode.
1960   This is for function empty
1961   """
1962   _ctx = ctx if ctx else _context.context()
1963   dtype = _execute.make_type(dtype, "dtype")
1964   if init is None:
1965     init = False
1966   init = _execute.make_bool(init, "init")
1967   shape = _ops.convert_to_tensor(shape, _dtypes.int32)
1968   _inputs_flat = [shape]
1969   _attrs = ("dtype", dtype, "init", init)
1970   _result = _execute.execute(b"Empty", 1, inputs=_inputs_flat, attrs=_attrs,
1971                              ctx=_ctx, name=name)
1972   _execute.record_gradient(
1973       "Empty", _inputs_flat, _attrs, _result, name)
1974   _result, = _result
1975   return _result
1976 
1977 
1978 def ensure_shape(input, shape, name=None):
1979   r"""Ensures that the tensor's shape matches the expected shape.
1980 
1981   Raises an error if the input tensor's shape does not match the specified shape.
1982   Returns the input tensor otherwise.
1983 
1984   Args:
1985     input: A `Tensor`. A tensor, whose shape is to be validated.
1986     shape: A `tf.TensorShape` or list of `ints`.
1987       The expected (possibly partially specified) shape of the input tensor.
1988     name: A name for the operation (optional).
1989 
1990   Returns:
1991     A `Tensor`. Has the same type as `input`.
1992   """
1993   _ctx = _context._context
1994   if _ctx is None or not _ctx._eager_context.is_eager:
1995     shape = _execute.make_shape(shape, "shape")
1996     _, _, _op = _op_def_lib._apply_op_helper(
1997         "EnsureShape", input=input, shape=shape, name=name)
1998     _result = _op.outputs[:]
1999     _inputs_flat = _op.inputs
2000     _attrs = ("shape", _op.get_attr("shape"), "T", _op.get_attr("T"))
2001     _execute.record_gradient(
2002       "EnsureShape", _inputs_flat, _attrs, _result, name)
2003     _result, = _result
2004     return _result
2005 
2006   else:
2007     try:
2008       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2009         _ctx._context_handle, _ctx._eager_context.device_name, "EnsureShape",
2010         name, _ctx._post_execution_callbacks, input, "shape", shape)
2011       return _result
2012     except _core._FallbackException:
2013       return ensure_shape_eager_fallback(
2014           input, shape=shape, name=name, ctx=_ctx)
2015     except _core._NotOkStatusException as e:
2016       if name is not None:
2017         message = e.message + " name: " + name
2018       else:
2019         message = e.message
2020       _six.raise_from(_core._status_to_exception(e.code, message), None)
2021 
2022 
2023 def ensure_shape_eager_fallback(input, shape, name=None, ctx=None):
2024   r"""This is the slowpath function for Eager mode.
2025   This is for function ensure_shape
2026   """
2027   _ctx = ctx if ctx else _context.context()
2028   shape = _execute.make_shape(shape, "shape")
2029   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
2030   _inputs_flat = [input]
2031   _attrs = ("shape", shape, "T", _attr_T)
2032   _result = _execute.execute(b"EnsureShape", 1, inputs=_inputs_flat,
2033                              attrs=_attrs, ctx=_ctx, name=name)
2034   _execute.record_gradient(
2035       "EnsureShape", _inputs_flat, _attrs, _result, name)
2036   _result, = _result
2037   return _result
2038 
2039 
2040 def expand_dims(input, axis, name=None):
2041   r"""Inserts a dimension of 1 into a tensor's shape.
2042 
2043   Given a tensor `input`, this operation inserts a dimension of 1 at the
2044   dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
2045   zero; if you specify a negative number for `axis` it is counted backward from
2046   the end.
2047 
2048   This operation is useful if you want to add a batch dimension to a single
2049   element. For example, if you have a single image of shape `[height, width,
2050   channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
2051   which will make the shape `[1, height, width, channels]`.
2052 
2053   Other examples:
2054 
2055   ```
2056   # 't' is a tensor of shape [2]
2057   shape(expand_dims(t, 0)) ==> [1, 2]
2058   shape(expand_dims(t, 1)) ==> [2, 1]
2059   shape(expand_dims(t, -1)) ==> [2, 1]
2060 
2061   # 't2' is a tensor of shape [2, 3, 5]
2062   shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
2063   shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
2064   shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
2065   ```
2066 
2067   This operation requires that:
2068 
2069   `-1-input.dims() <= dim <= input.dims()`
2070 
2071   This operation is related to `squeeze()`, which removes dimensions of
2072   size 1.
2073 
2074   Args:
2075     input: A `Tensor`.
2076     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2077       0-D (scalar). Specifies the dimension index at which to
2078       expand the shape of `input`. Must be in the range
2079       `[-rank(input) - 1, rank(input)]`.
2080     name: A name for the operation (optional).
2081 
2082   Returns:
2083     A `Tensor`. Has the same type as `input`.
2084   """
2085   _ctx = _context._context
2086   if _ctx is None or not _ctx._eager_context.is_eager:
2087     _, _, _op = _op_def_lib._apply_op_helper(
2088         "ExpandDims", input=input, dim=axis, name=name)
2089     _result = _op.outputs[:]
2090     _inputs_flat = _op.inputs
2091     _attrs = ("T", _op.get_attr("T"), "Tdim", _op.get_attr("Tdim"))
2092     _execute.record_gradient(
2093       "ExpandDims", _inputs_flat, _attrs, _result, name)
2094     _result, = _result
2095     return _result
2096 
2097   else:
2098     try:
2099       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2100         _ctx._context_handle, _ctx._eager_context.device_name, "ExpandDims",
2101         name, _ctx._post_execution_callbacks, input, axis)
2102       return _result
2103     except _core._FallbackException:
2104       return expand_dims_eager_fallback(
2105           input, axis, name=name, ctx=_ctx)
2106     except _core._NotOkStatusException as e:
2107       if name is not None:
2108         message = e.message + " name: " + name
2109       else:
2110         message = e.message
2111       _six.raise_from(_core._status_to_exception(e.code, message), None)
2112 
2113 
2114 def expand_dims_eager_fallback(input, axis, name=None, ctx=None):
2115   r"""This is the slowpath function for Eager mode.
2116   This is for function expand_dims
2117   """
2118   _ctx = ctx if ctx else _context.context()
2119   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
2120   _attr_Tdim, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
2121   _inputs_flat = [input, axis]
2122   _attrs = ("T", _attr_T, "Tdim", _attr_Tdim)
2123   _result = _execute.execute(b"ExpandDims", 1, inputs=_inputs_flat,
2124                              attrs=_attrs, ctx=_ctx, name=name)
2125   _execute.record_gradient(
2126       "ExpandDims", _inputs_flat, _attrs, _result, name)
2127   _result, = _result
2128   return _result
2129 
2130 
2131 @tf_export('image.extract_image_patches', 'extract_image_patches')
2132 @deprecated_endpoints('extract_image_patches')
2133 def extract_image_patches(images, ksizes, strides, rates, padding, name=None):
2134   r"""Extract `patches` from `images` and put them in the "depth" output dimension.
2135 
2136   Args:
2137     images: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2138       4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
2139     ksizes: A list of `ints` that has length `>= 4`.
2140       The size of the sliding window for each dimension of `images`.
2141     strides: A list of `ints` that has length `>= 4`.
2142       1-D of length 4. How far the centers of two consecutive patches are in
2143       the images. Must be: `[1, stride_rows, stride_cols, 1]`.
2144     rates: A list of `ints` that has length `>= 4`.
2145       1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
2146       input stride, specifying how far two consecutive patch samples are in the
2147       input. Equivalent to extracting patches with
2148       `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
2149       subsampling them spatially by a factor of `rates`. This is equivalent to
2150       `rate` in dilated (a.k.a. Atrous) convolutions.
2151     padding: A `string` from: `"SAME", "VALID"`.
2152       The type of padding algorithm to use.
2153 
2154       We specify the size-related attributes as:
2155 
2156       ```python
2157             ksizes = [1, ksize_rows, ksize_cols, 1]
2158             strides = [1, strides_rows, strides_cols, 1]
2159             rates = [1, rates_rows, rates_cols, 1]
2160       ```
2161     name: A name for the operation (optional).
2162 
2163   Returns:
2164     A `Tensor`. Has the same type as `images`.
2165   """
2166   _ctx = _context._context
2167   if _ctx is None or not _ctx._eager_context.is_eager:
2168     if not isinstance(ksizes, (list, tuple)):
2169       raise TypeError(
2170           "Expected list for 'ksizes' argument to "
2171           "'extract_image_patches' Op, not %r." % ksizes)
2172     ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes]
2173     if not isinstance(strides, (list, tuple)):
2174       raise TypeError(
2175           "Expected list for 'strides' argument to "
2176           "'extract_image_patches' Op, not %r." % strides)
2177     strides = [_execute.make_int(_i, "strides") for _i in strides]
2178     if not isinstance(rates, (list, tuple)):
2179       raise TypeError(
2180           "Expected list for 'rates' argument to "
2181           "'extract_image_patches' Op, not %r." % rates)
2182     rates = [_execute.make_int(_i, "rates") for _i in rates]
2183     padding = _execute.make_str(padding, "padding")
2184     _, _, _op = _op_def_lib._apply_op_helper(
2185         "ExtractImagePatches", images=images, ksizes=ksizes, strides=strides,
2186         rates=rates, padding=padding, name=name)
2187     _result = _op.outputs[:]
2188     _inputs_flat = _op.inputs
2189     _attrs = ("ksizes", _op.get_attr("ksizes"), "strides",
2190               _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T",
2191               _op.get_attr("T"), "padding", _op.get_attr("padding"))
2192     _execute.record_gradient(
2193       "ExtractImagePatches", _inputs_flat, _attrs, _result, name)
2194     _result, = _result
2195     return _result
2196 
2197   else:
2198     try:
2199       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2200         _ctx._context_handle, _ctx._eager_context.device_name,
2201         "ExtractImagePatches", name, _ctx._post_execution_callbacks, images,
2202         "ksizes", ksizes, "strides", strides, "rates", rates, "padding",
2203         padding)
2204       return _result
2205     except _core._FallbackException:
2206       return extract_image_patches_eager_fallback(
2207           images, ksizes=ksizes, strides=strides, rates=rates,
2208           padding=padding, name=name, ctx=_ctx)
2209     except _core._NotOkStatusException as e:
2210       if name is not None:
2211         message = e.message + " name: " + name
2212       else:
2213         message = e.message
2214       _six.raise_from(_core._status_to_exception(e.code, message), None)
2215 
2216 
2217 def extract_image_patches_eager_fallback(images, ksizes, strides, rates, padding, name=None, ctx=None):
2218   r"""This is the slowpath function for Eager mode.
2219   This is for function extract_image_patches
2220   """
2221   _ctx = ctx if ctx else _context.context()
2222   if not isinstance(ksizes, (list, tuple)):
2223     raise TypeError(
2224         "Expected list for 'ksizes' argument to "
2225         "'extract_image_patches' Op, not %r." % ksizes)
2226   ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes]
2227   if not isinstance(strides, (list, tuple)):
2228     raise TypeError(
2229         "Expected list for 'strides' argument to "
2230         "'extract_image_patches' Op, not %r." % strides)
2231   strides = [_execute.make_int(_i, "strides") for _i in strides]
2232   if not isinstance(rates, (list, tuple)):
2233     raise TypeError(
2234         "Expected list for 'rates' argument to "
2235         "'extract_image_patches' Op, not %r." % rates)
2236   rates = [_execute.make_int(_i, "rates") for _i in rates]
2237   padding = _execute.make_str(padding, "padding")
2238   _attr_T, (images,) = _execute.args_to_matching_eager([images], _ctx)
2239   _inputs_flat = [images]
2240   _attrs = ("ksizes", ksizes, "strides", strides, "rates", rates, "T",
2241   _attr_T, "padding", padding)
2242   _result = _execute.execute(b"ExtractImagePatches", 1, inputs=_inputs_flat,
2243                              attrs=_attrs, ctx=_ctx, name=name)
2244   _execute.record_gradient(
2245       "ExtractImagePatches", _inputs_flat, _attrs, _result, name)
2246   _result, = _result
2247   return _result
2248 
2249 
2250 @tf_export('extract_volume_patches')
2251 def extract_volume_patches(input, ksizes, strides, padding, name=None):
2252   r"""Extract `patches` from `input` and put them in the "depth" output
2253 dimension. 3D extension of `extract_image_patches`.
2254 
2255   Args:
2256     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2257       5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
2258     ksizes: A list of `ints` that has length `>= 5`.
2259       The size of the sliding window for each dimension of `input`.
2260     strides: A list of `ints` that has length `>= 5`.
2261       1-D of length 5. How far the centers of two consecutive patches are in
2262       `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
2263     padding: A `string` from: `"SAME", "VALID"`.
2264       The type of padding algorithm to use.
2265 
2266       We specify the size-related attributes as:
2267 
2268       ```python
2269             ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
2270             strides = [1, stride_planes, strides_rows, strides_cols, 1]
2271       ```
2272     name: A name for the operation (optional).
2273 
2274   Returns:
2275     A `Tensor`. Has the same type as `input`.
2276   """
2277   _ctx = _context._context
2278   if _ctx is None or not _ctx._eager_context.is_eager:
2279     if not isinstance(ksizes, (list, tuple)):
2280       raise TypeError(
2281           "Expected list for 'ksizes' argument to "
2282           "'extract_volume_patches' Op, not %r." % ksizes)
2283     ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes]
2284     if not isinstance(strides, (list, tuple)):
2285       raise TypeError(
2286           "Expected list for 'strides' argument to "
2287           "'extract_volume_patches' Op, not %r." % strides)
2288     strides = [_execute.make_int(_i, "strides") for _i in strides]
2289     padding = _execute.make_str(padding, "padding")
2290     _, _, _op = _op_def_lib._apply_op_helper(
2291         "ExtractVolumePatches", input=input, ksizes=ksizes, strides=strides,
2292         padding=padding, name=name)
2293     _result = _op.outputs[:]
2294     _inputs_flat = _op.inputs
2295     _attrs = ("ksizes", _op.get_attr("ksizes"), "strides",
2296               _op.get_attr("strides"), "T", _op.get_attr("T"), "padding",
2297               _op.get_attr("padding"))
2298     _execute.record_gradient(
2299       "ExtractVolumePatches", _inputs_flat, _attrs, _result, name)
2300     _result, = _result
2301     return _result
2302 
2303   else:
2304     try:
2305       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2306         _ctx._context_handle, _ctx._eager_context.device_name,
2307         "ExtractVolumePatches", name, _ctx._post_execution_callbacks, input,
2308         "ksizes", ksizes, "strides", strides, "padding", padding)
2309       return _result
2310     except _core._FallbackException:
2311       return extract_volume_patches_eager_fallback(
2312           input, ksizes=ksizes, strides=strides, padding=padding, name=name,
2313           ctx=_ctx)
2314     except _core._NotOkStatusException as e:
2315       if name is not None:
2316         message = e.message + " name: " + name
2317       else:
2318         message = e.message
2319       _six.raise_from(_core._status_to_exception(e.code, message), None)
2320 
2321 
2322 def extract_volume_patches_eager_fallback(input, ksizes, strides, padding, name=None, ctx=None):
2323   r"""This is the slowpath function for Eager mode.
2324   This is for function extract_volume_patches
2325   """
2326   _ctx = ctx if ctx else _context.context()
2327   if not isinstance(ksizes, (list, tuple)):
2328     raise TypeError(
2329         "Expected list for 'ksizes' argument to "
2330         "'extract_volume_patches' Op, not %r." % ksizes)
2331   ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes]
2332   if not isinstance(strides, (list, tuple)):
2333     raise TypeError(
2334         "Expected list for 'strides' argument to "
2335         "'extract_volume_patches' Op, not %r." % strides)
2336   strides = [_execute.make_int(_i, "strides") for _i in strides]
2337   padding = _execute.make_str(padding, "padding")
2338   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
2339   _inputs_flat = [input]
2340   _attrs = ("ksizes", ksizes, "strides", strides, "T", _attr_T, "padding",
2341   padding)
2342   _result = _execute.execute(b"ExtractVolumePatches", 1, inputs=_inputs_flat,
2343                              attrs=_attrs, ctx=_ctx, name=name)
2344   _execute.record_gradient(
2345       "ExtractVolumePatches", _inputs_flat, _attrs, _result, name)
2346   _result, = _result
2347   return _result
2348 
2349 
2350 @tf_export('quantization.fake_quant_with_min_max_args', 'fake_quant_with_min_max_args')
2351 @deprecated_endpoints('fake_quant_with_min_max_args')
2352 def fake_quant_with_min_max_args(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None):
2353   r"""Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
2354 
2355   Attributes `[min; max]` define the clamping range for the `inputs` data.
2356   `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
2357   when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
2358   then de-quantized and output as floats in `[min; max]` interval.
2359   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
2360 
2361   Quantization is called fake since the output is still in floating point.
2362 
2363   Args:
2364     inputs: A `Tensor` of type `float32`.
2365     min: An optional `float`. Defaults to `-6`.
2366     max: An optional `float`. Defaults to `6`.
2367     num_bits: An optional `int`. Defaults to `8`.
2368     narrow_range: An optional `bool`. Defaults to `False`.
2369     name: A name for the operation (optional).
2370 
2371   Returns:
2372     A `Tensor` of type `float32`.
2373   """
2374   _ctx = _context._context
2375   if _ctx is None or not _ctx._eager_context.is_eager:
2376     if min is None:
2377       min = -6
2378     min = _execute.make_float(min, "min")
2379     if max is None:
2380       max = 6
2381     max = _execute.make_float(max, "max")
2382     if num_bits is None:
2383       num_bits = 8
2384     num_bits = _execute.make_int(num_bits, "num_bits")
2385     if narrow_range is None:
2386       narrow_range = False
2387     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2388     _, _, _op = _op_def_lib._apply_op_helper(
2389         "FakeQuantWithMinMaxArgs", inputs=inputs, min=min, max=max,
2390         num_bits=num_bits, narrow_range=narrow_range, name=name)
2391     _result = _op.outputs[:]
2392     _inputs_flat = _op.inputs
2393     _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"),
2394               "num_bits", _op.get_attr("num_bits"), "narrow_range",
2395               _op.get_attr("narrow_range"))
2396     _execute.record_gradient(
2397       "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result, name)
2398     _result, = _result
2399     return _result
2400 
2401   else:
2402     try:
2403       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2404         _ctx._context_handle, _ctx._eager_context.device_name,
2405         "FakeQuantWithMinMaxArgs", name, _ctx._post_execution_callbacks,
2406         inputs, "min", min, "max", max, "num_bits", num_bits, "narrow_range",
2407         narrow_range)
2408       return _result
2409     except _core._FallbackException:
2410       return fake_quant_with_min_max_args_eager_fallback(
2411           inputs, min=min, max=max, num_bits=num_bits,
2412           narrow_range=narrow_range, name=name, ctx=_ctx)
2413     except _core._NotOkStatusException as e:
2414       if name is not None:
2415         message = e.message + " name: " + name
2416       else:
2417         message = e.message
2418       _six.raise_from(_core._status_to_exception(e.code, message), None)
2419 
2420 
2421 def fake_quant_with_min_max_args_eager_fallback(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None):
2422   r"""This is the slowpath function for Eager mode.
2423   This is for function fake_quant_with_min_max_args
2424   """
2425   _ctx = ctx if ctx else _context.context()
2426   if min is None:
2427     min = -6
2428   min = _execute.make_float(min, "min")
2429   if max is None:
2430     max = 6
2431   max = _execute.make_float(max, "max")
2432   if num_bits is None:
2433     num_bits = 8
2434   num_bits = _execute.make_int(num_bits, "num_bits")
2435   if narrow_range is None:
2436     narrow_range = False
2437   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2438   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2439   _inputs_flat = [inputs]
2440   _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range",
2441   narrow_range)
2442   _result = _execute.execute(b"FakeQuantWithMinMaxArgs", 1,
2443                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2444                              name=name)
2445   _execute.record_gradient(
2446       "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result, name)
2447   _result, = _result
2448   return _result
2449 
2450 
2451 @tf_export('quantization.fake_quant_with_min_max_args_gradient', 'fake_quant_with_min_max_args_gradient')
2452 @deprecated_endpoints('fake_quant_with_min_max_args_gradient')
2453 def fake_quant_with_min_max_args_gradient(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None):
2454   r"""Compute gradients for a FakeQuantWithMinMaxArgs operation.
2455 
2456   Args:
2457     gradients: A `Tensor` of type `float32`.
2458       Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
2459     inputs: A `Tensor` of type `float32`.
2460       Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
2461     min: An optional `float`. Defaults to `-6`.
2462     max: An optional `float`. Defaults to `6`.
2463     num_bits: An optional `int`. Defaults to `8`.
2464     narrow_range: An optional `bool`. Defaults to `False`.
2465     name: A name for the operation (optional).
2466 
2467   Returns:
2468     A `Tensor` of type `float32`.
2469   """
2470   _ctx = _context._context
2471   if _ctx is None or not _ctx._eager_context.is_eager:
2472     if min is None:
2473       min = -6
2474     min = _execute.make_float(min, "min")
2475     if max is None:
2476       max = 6
2477     max = _execute.make_float(max, "max")
2478     if num_bits is None:
2479       num_bits = 8
2480     num_bits = _execute.make_int(num_bits, "num_bits")
2481     if narrow_range is None:
2482       narrow_range = False
2483     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2484     _, _, _op = _op_def_lib._apply_op_helper(
2485         "FakeQuantWithMinMaxArgsGradient", gradients=gradients, inputs=inputs,
2486         min=min, max=max, num_bits=num_bits, narrow_range=narrow_range,
2487         name=name)
2488     _result = _op.outputs[:]
2489     _inputs_flat = _op.inputs
2490     _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"),
2491               "num_bits", _op.get_attr("num_bits"), "narrow_range",
2492               _op.get_attr("narrow_range"))
2493     _execute.record_gradient(
2494       "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result, name)
2495     _result, = _result
2496     return _result
2497 
2498   else:
2499     try:
2500       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2501         _ctx._context_handle, _ctx._eager_context.device_name,
2502         "FakeQuantWithMinMaxArgsGradient", name,
2503         _ctx._post_execution_callbacks, gradients, inputs, "min", min, "max",
2504         max, "num_bits", num_bits, "narrow_range", narrow_range)
2505       return _result
2506     except _core._FallbackException:
2507       return fake_quant_with_min_max_args_gradient_eager_fallback(
2508           gradients, inputs, min=min, max=max, num_bits=num_bits,
2509           narrow_range=narrow_range, name=name, ctx=_ctx)
2510     except _core._NotOkStatusException as e:
2511       if name is not None:
2512         message = e.message + " name: " + name
2513       else:
2514         message = e.message
2515       _six.raise_from(_core._status_to_exception(e.code, message), None)
2516 
2517 
2518 def fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None):
2519   r"""This is the slowpath function for Eager mode.
2520   This is for function fake_quant_with_min_max_args_gradient
2521   """
2522   _ctx = ctx if ctx else _context.context()
2523   if min is None:
2524     min = -6
2525   min = _execute.make_float(min, "min")
2526   if max is None:
2527     max = 6
2528   max = _execute.make_float(max, "max")
2529   if num_bits is None:
2530     num_bits = 8
2531   num_bits = _execute.make_int(num_bits, "num_bits")
2532   if narrow_range is None:
2533     narrow_range = False
2534   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2535   gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
2536   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2537   _inputs_flat = [gradients, inputs]
2538   _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range",
2539   narrow_range)
2540   _result = _execute.execute(b"FakeQuantWithMinMaxArgsGradient", 1,
2541                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2542                              name=name)
2543   _execute.record_gradient(
2544       "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result, name)
2545   _result, = _result
2546   return _result
2547 
2548 
2549 @tf_export('quantization.fake_quant_with_min_max_vars', 'fake_quant_with_min_max_vars')
2550 @deprecated_endpoints('fake_quant_with_min_max_vars')
2551 def fake_quant_with_min_max_vars(inputs, min, max, num_bits=8, narrow_range=False, name=None):
2552   r"""Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
2553 
2554   and `max` to 'outputs' tensor of same shape as `inputs`.
2555 
2556   `[min; max]` define the clamping range for the `inputs` data.
2557   `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
2558   when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
2559   then de-quantized and output as floats in `[min; max]` interval.
2560   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
2561 
2562   This operation has a gradient and thus allows for training `min` and `max`
2563   values.
2564 
2565   Args:
2566     inputs: A `Tensor` of type `float32`.
2567     min: A `Tensor` of type `float32`.
2568     max: A `Tensor` of type `float32`.
2569     num_bits: An optional `int`. Defaults to `8`.
2570     narrow_range: An optional `bool`. Defaults to `False`.
2571     name: A name for the operation (optional).
2572 
2573   Returns:
2574     A `Tensor` of type `float32`.
2575   """
2576   _ctx = _context._context
2577   if _ctx is None or not _ctx._eager_context.is_eager:
2578     if num_bits is None:
2579       num_bits = 8
2580     num_bits = _execute.make_int(num_bits, "num_bits")
2581     if narrow_range is None:
2582       narrow_range = False
2583     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2584     _, _, _op = _op_def_lib._apply_op_helper(
2585         "FakeQuantWithMinMaxVars", inputs=inputs, min=min, max=max,
2586         num_bits=num_bits, narrow_range=narrow_range, name=name)
2587     _result = _op.outputs[:]
2588     _inputs_flat = _op.inputs
2589     _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range",
2590               _op.get_attr("narrow_range"))
2591     _execute.record_gradient(
2592       "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result, name)
2593     _result, = _result
2594     return _result
2595 
2596   else:
2597     try:
2598       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2599         _ctx._context_handle, _ctx._eager_context.device_name,
2600         "FakeQuantWithMinMaxVars", name, _ctx._post_execution_callbacks,
2601         inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range)
2602       return _result
2603     except _core._FallbackException:
2604       return fake_quant_with_min_max_vars_eager_fallback(
2605           inputs, min, max, num_bits=num_bits, narrow_range=narrow_range,
2606           name=name, ctx=_ctx)
2607     except _core._NotOkStatusException as e:
2608       if name is not None:
2609         message = e.message + " name: " + name
2610       else:
2611         message = e.message
2612       _six.raise_from(_core._status_to_exception(e.code, message), None)
2613 
2614 
2615 def fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):
2616   r"""This is the slowpath function for Eager mode.
2617   This is for function fake_quant_with_min_max_vars
2618   """
2619   _ctx = ctx if ctx else _context.context()
2620   if num_bits is None:
2621     num_bits = 8
2622   num_bits = _execute.make_int(num_bits, "num_bits")
2623   if narrow_range is None:
2624     narrow_range = False
2625   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2626   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2627   min = _ops.convert_to_tensor(min, _dtypes.float32)
2628   max = _ops.convert_to_tensor(max, _dtypes.float32)
2629   _inputs_flat = [inputs, min, max]
2630   _attrs = ("num_bits", num_bits, "narrow_range", narrow_range)
2631   _result = _execute.execute(b"FakeQuantWithMinMaxVars", 1,
2632                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2633                              name=name)
2634   _execute.record_gradient(
2635       "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result, name)
2636   _result, = _result
2637   return _result
2638 
2639 
2640 _fake_quant_with_min_max_vars_gradient_outputs = ["backprops_wrt_input",
2641                                                  "backprop_wrt_min",
2642                                                  "backprop_wrt_max"]
2643 _FakeQuantWithMinMaxVarsGradientOutput = _collections.namedtuple(
2644     "FakeQuantWithMinMaxVarsGradient",
2645     _fake_quant_with_min_max_vars_gradient_outputs)
2646 
2647 
2648 @tf_export('quantization.fake_quant_with_min_max_vars_gradient', 'fake_quant_with_min_max_vars_gradient')
2649 @deprecated_endpoints('fake_quant_with_min_max_vars_gradient')
2650 def fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None):
2651   r"""Compute gradients for a FakeQuantWithMinMaxVars operation.
2652 
2653   Args:
2654     gradients: A `Tensor` of type `float32`.
2655       Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
2656     inputs: A `Tensor` of type `float32`.
2657       Values passed as inputs to the FakeQuantWithMinMaxVars operation.
2658       min, max: Quantization interval, scalar floats.
2659     min: A `Tensor` of type `float32`.
2660     max: A `Tensor` of type `float32`.
2661     num_bits: An optional `int`. Defaults to `8`.
2662       The bitwidth of the quantization; between 2 and 8, inclusive.
2663     narrow_range: An optional `bool`. Defaults to `False`.
2664       Whether to quantize into 2^num_bits - 1 distinct values.
2665     name: A name for the operation (optional).
2666 
2667   Returns:
2668     A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
2669 
2670     backprops_wrt_input: A `Tensor` of type `float32`.
2671     backprop_wrt_min: A `Tensor` of type `float32`.
2672     backprop_wrt_max: A `Tensor` of type `float32`.
2673   """
2674   _ctx = _context._context
2675   if _ctx is None or not _ctx._eager_context.is_eager:
2676     if num_bits is None:
2677       num_bits = 8
2678     num_bits = _execute.make_int(num_bits, "num_bits")
2679     if narrow_range is None:
2680       narrow_range = False
2681     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2682     _, _, _op = _op_def_lib._apply_op_helper(
2683         "FakeQuantWithMinMaxVarsGradient", gradients=gradients, inputs=inputs,
2684         min=min, max=max, num_bits=num_bits, narrow_range=narrow_range,
2685         name=name)
2686     _result = _op.outputs[:]
2687     _inputs_flat = _op.inputs
2688     _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range",
2689               _op.get_attr("narrow_range"))
2690     _execute.record_gradient(
2691       "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result, name)
2692     _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)
2693     return _result
2694 
2695   else:
2696     try:
2697       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2698         _ctx._context_handle, _ctx._eager_context.device_name,
2699         "FakeQuantWithMinMaxVarsGradient", name,
2700         _ctx._post_execution_callbacks, gradients, inputs, min, max,
2701         "num_bits", num_bits, "narrow_range", narrow_range)
2702       _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)
2703       return _result
2704     except _core._FallbackException:
2705       return fake_quant_with_min_max_vars_gradient_eager_fallback(
2706           gradients, inputs, min, max, num_bits=num_bits,
2707           narrow_range=narrow_range, name=name, ctx=_ctx)
2708     except _core._NotOkStatusException as e:
2709       if name is not None:
2710         message = e.message + " name: " + name
2711       else:
2712         message = e.message
2713       _six.raise_from(_core._status_to_exception(e.code, message), None)
2714 
2715 
2716 def fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):
2717   r"""This is the slowpath function for Eager mode.
2718   This is for function fake_quant_with_min_max_vars_gradient
2719   """
2720   _ctx = ctx if ctx else _context.context()
2721   if num_bits is None:
2722     num_bits = 8
2723   num_bits = _execute.make_int(num_bits, "num_bits")
2724   if narrow_range is None:
2725     narrow_range = False
2726   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2727   gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
2728   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2729   min = _ops.convert_to_tensor(min, _dtypes.float32)
2730   max = _ops.convert_to_tensor(max, _dtypes.float32)
2731   _inputs_flat = [gradients, inputs, min, max]
2732   _attrs = ("num_bits", num_bits, "narrow_range", narrow_range)
2733   _result = _execute.execute(b"FakeQuantWithMinMaxVarsGradient", 3,
2734                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2735                              name=name)
2736   _execute.record_gradient(
2737       "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result, name)
2738   _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)
2739   return _result
2740 
2741 
2742 @tf_export('quantization.fake_quant_with_min_max_vars_per_channel', 'fake_quant_with_min_max_vars_per_channel')
2743 @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel')
2744 def fake_quant_with_min_max_vars_per_channel(inputs, min, max, num_bits=8, narrow_range=False, name=None):
2745   r"""Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
2746 
2747   `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
2748   to 'outputs' tensor of same shape as `inputs`.
2749 
2750   `[min; max]` define the clamping range for the `inputs` data.
2751   `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
2752   when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
2753   then de-quantized and output as floats in `[min; max]` interval.
2754   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
2755 
2756   This operation has a gradient and thus allows for training `min` and `max`
2757   values.
2758 
2759   Args:
2760     inputs: A `Tensor` of type `float32`.
2761     min: A `Tensor` of type `float32`.
2762     max: A `Tensor` of type `float32`.
2763     num_bits: An optional `int`. Defaults to `8`.
2764     narrow_range: An optional `bool`. Defaults to `False`.
2765     name: A name for the operation (optional).
2766 
2767   Returns:
2768     A `Tensor` of type `float32`.
2769   """
2770   _ctx = _context._context
2771   if _ctx is None or not _ctx._eager_context.is_eager:
2772     if num_bits is None:
2773       num_bits = 8
2774     num_bits = _execute.make_int(num_bits, "num_bits")
2775     if narrow_range is None:
2776       narrow_range = False
2777     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2778     _, _, _op = _op_def_lib._apply_op_helper(
2779         "FakeQuantWithMinMaxVarsPerChannel", inputs=inputs, min=min, max=max,
2780         num_bits=num_bits, narrow_range=narrow_range, name=name)
2781     _result = _op.outputs[:]
2782     _inputs_flat = _op.inputs
2783     _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range",
2784               _op.get_attr("narrow_range"))
2785     _execute.record_gradient(
2786       "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result, name)
2787     _result, = _result
2788     return _result
2789 
2790   else:
2791     try:
2792       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2793         _ctx._context_handle, _ctx._eager_context.device_name,
2794         "FakeQuantWithMinMaxVarsPerChannel", name,
2795         _ctx._post_execution_callbacks, inputs, min, max, "num_bits",
2796         num_bits, "narrow_range", narrow_range)
2797       return _result
2798     except _core._FallbackException:
2799       return fake_quant_with_min_max_vars_per_channel_eager_fallback(
2800           inputs, min, max, num_bits=num_bits, narrow_range=narrow_range,
2801           name=name, ctx=_ctx)
2802     except _core._NotOkStatusException as e:
2803       if name is not None:
2804         message = e.message + " name: " + name
2805       else:
2806         message = e.message
2807       _six.raise_from(_core._status_to_exception(e.code, message), None)
2808 
2809 
2810 def fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):
2811   r"""This is the slowpath function for Eager mode.
2812   This is for function fake_quant_with_min_max_vars_per_channel
2813   """
2814   _ctx = ctx if ctx else _context.context()
2815   if num_bits is None:
2816     num_bits = 8
2817   num_bits = _execute.make_int(num_bits, "num_bits")
2818   if narrow_range is None:
2819     narrow_range = False
2820   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2821   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2822   min = _ops.convert_to_tensor(min, _dtypes.float32)
2823   max = _ops.convert_to_tensor(max, _dtypes.float32)
2824   _inputs_flat = [inputs, min, max]
2825   _attrs = ("num_bits", num_bits, "narrow_range", narrow_range)
2826   _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannel", 1,
2827                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2828                              name=name)
2829   _execute.record_gradient(
2830       "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result, name)
2831   _result, = _result
2832   return _result
2833 
2834 
2835 _fake_quant_with_min_max_vars_per_channel_gradient_outputs = ["backprops_wrt_input",
2836                                                              "backprop_wrt_min",
2837                                                              "backprop_wrt_max"]
2838 _FakeQuantWithMinMaxVarsPerChannelGradientOutput = _collections.namedtuple(
2839     "FakeQuantWithMinMaxVarsPerChannelGradient",
2840     _fake_quant_with_min_max_vars_per_channel_gradient_outputs)
2841 
2842 
2843 @tf_export('quantization.fake_quant_with_min_max_vars_per_channel_gradient', 'fake_quant_with_min_max_vars_per_channel_gradient')
2844 @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel_gradient')
2845 def fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None):
2846   r"""Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
2847 
2848   Args:
2849     gradients: A `Tensor` of type `float32`.
2850       Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
2851       shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
2852     inputs: A `Tensor` of type `float32`.
2853       Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
2854         same as `gradients`.
2855       min, max: Quantization interval, floats of shape `[d]`.
2856     min: A `Tensor` of type `float32`.
2857     max: A `Tensor` of type `float32`.
2858     num_bits: An optional `int`. Defaults to `8`.
2859       The bitwidth of the quantization; between 2 and 16, inclusive.
2860     narrow_range: An optional `bool`. Defaults to `False`.
2861       Whether to quantize into 2^num_bits - 1 distinct values.
2862     name: A name for the operation (optional).
2863 
2864   Returns:
2865     A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
2866 
2867     backprops_wrt_input: A `Tensor` of type `float32`.
2868     backprop_wrt_min: A `Tensor` of type `float32`.
2869     backprop_wrt_max: A `Tensor` of type `float32`.
2870   """
2871   _ctx = _context._context
2872   if _ctx is None or not _ctx._eager_context.is_eager:
2873     if num_bits is None:
2874       num_bits = 8
2875     num_bits = _execute.make_int(num_bits, "num_bits")
2876     if narrow_range is None:
2877       narrow_range = False
2878     narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2879     _, _, _op = _op_def_lib._apply_op_helper(
2880         "FakeQuantWithMinMaxVarsPerChannelGradient", gradients=gradients,
2881         inputs=inputs, min=min, max=max, num_bits=num_bits,
2882         narrow_range=narrow_range, name=name)
2883     _result = _op.outputs[:]
2884     _inputs_flat = _op.inputs
2885     _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range",
2886               _op.get_attr("narrow_range"))
2887     _execute.record_gradient(
2888       "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result, name)
2889     _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)
2890     return _result
2891 
2892   else:
2893     try:
2894       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2895         _ctx._context_handle, _ctx._eager_context.device_name,
2896         "FakeQuantWithMinMaxVarsPerChannelGradient", name,
2897         _ctx._post_execution_callbacks, gradients, inputs, min, max,
2898         "num_bits", num_bits, "narrow_range", narrow_range)
2899       _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)
2900       return _result
2901     except _core._FallbackException:
2902       return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(
2903           gradients, inputs, min, max, num_bits=num_bits,
2904           narrow_range=narrow_range, name=name, ctx=_ctx)
2905     except _core._NotOkStatusException as e:
2906       if name is not None:
2907         message = e.message + " name: " + name
2908       else:
2909         message = e.message
2910       _six.raise_from(_core._status_to_exception(e.code, message), None)
2911 
2912 
2913 def fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):
2914   r"""This is the slowpath function for Eager mode.
2915   This is for function fake_quant_with_min_max_vars_per_channel_gradient
2916   """
2917   _ctx = ctx if ctx else _context.context()
2918   if num_bits is None:
2919     num_bits = 8
2920   num_bits = _execute.make_int(num_bits, "num_bits")
2921   if narrow_range is None:
2922     narrow_range = False
2923   narrow_range = _execute.make_bool(narrow_range, "narrow_range")
2924   gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
2925   inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
2926   min = _ops.convert_to_tensor(min, _dtypes.float32)
2927   max = _ops.convert_to_tensor(max, _dtypes.float32)
2928   _inputs_flat = [gradients, inputs, min, max]
2929   _attrs = ("num_bits", num_bits, "narrow_range", narrow_range)
2930   _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannelGradient", 3,
2931                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2932                              name=name)
2933   _execute.record_gradient(
2934       "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result, name)
2935   _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)
2936   return _result
2937 
2938 
2939 @tf_export('fill')
2940 def fill(dims, value, name=None):
2941   r"""Creates a tensor filled with a scalar value.
2942 
2943   This operation creates a tensor of shape `dims` and fills it with `value`.
2944 
2945   For example:
2946 
2947   ```
2948   # Output tensor has shape [2, 3].
2949   fill([2, 3], 9) ==> [[9, 9, 9]
2950                        [9, 9, 9]]
2951   ```
2952 
2953   `tf.fill` differs from `tf.constant` in a few ways:
2954 
2955   *   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
2956       Tensor values.
2957   *   `tf.fill` creates an Op in the computation graph that constructs the actual
2958       Tensor value at runtime. This is in contrast to `tf.constant` which embeds
2959       the entire Tensor into the graph with a `Const` node.
2960   *   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
2961       based on other runtime Tensors, unlike `tf.constant`.
2962 
2963   Args:
2964     dims: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2965       1-D. Represents the shape of the output tensor.
2966     value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
2967 
2968       @compatibility(numpy)
2969       Equivalent to np.full
2970       @end_compatibility
2971     name: A name for the operation (optional).
2972 
2973   Returns:
2974     A `Tensor`. Has the same type as `value`.
2975   """
2976   _ctx = _context._context
2977   if _ctx is None or not _ctx._eager_context.is_eager:
2978     _, _, _op = _op_def_lib._apply_op_helper(
2979         "Fill", dims=dims, value=value, name=name)
2980     _result = _op.outputs[:]
2981     _inputs_flat = _op.inputs
2982     _attrs = ("T", _op.get_attr("T"), "index_type",
2983               _op.get_attr("index_type"))
2984     _execute.record_gradient(
2985       "Fill", _inputs_flat, _attrs, _result, name)
2986     _result, = _result
2987     return _result
2988 
2989   else:
2990     try:
2991       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2992         _ctx._context_handle, _ctx._eager_context.device_name, "Fill", name,
2993         _ctx._post_execution_callbacks, dims, value)
2994       return _result
2995     except _core._FallbackException:
2996       return fill_eager_fallback(
2997           dims, value, name=name, ctx=_ctx)
2998     except _core._NotOkStatusException as e:
2999       if name is not None:
3000         message = e.message + " name: " + name
3001       else:
3002         message = e.message
3003       _six.raise_from(_core._status_to_exception(e.code, message), None)
3004 
3005 
3006 def fill_eager_fallback(dims, value, name=None, ctx=None):
3007   r"""This is the slowpath function for Eager mode.
3008   This is for function fill
3009   """
3010   _ctx = ctx if ctx else _context.context()
3011   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
3012   _attr_index_type, (dims,) = _execute.args_to_matching_eager([dims], _ctx, _dtypes.int32)
3013   _inputs_flat = [dims, value]
3014   _attrs = ("T", _attr_T, "index_type", _attr_index_type)
3015   _result = _execute.execute(b"Fill", 1, inputs=_inputs_flat, attrs=_attrs,
3016                              ctx=_ctx, name=name)
3017   _execute.record_gradient(
3018       "Fill", _inputs_flat, _attrs, _result, name)
3019   _result, = _result
3020   return _result
3021 
3022 
3023 def gather(params, indices, validate_indices=True, name=None):
3024   r"""Gather slices from `params` according to `indices`.
3025 
3026   `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
3027   Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
3028 
3029   ```python
3030       # Scalar indices
3031       output[:, ..., :] = params[indices, :, ... :]
3032 
3033       # Vector indices
3034       output[i, :, ..., :] = params[indices[i], :, ... :]
3035 
3036       # Higher rank indices
3037       output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
3038   ```
3039 
3040   If `indices` is a permutation and `len(indices) == params.shape[0]` then
3041   this operation will permute `params` accordingly.
3042 
3043   `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
3044   `indices` are always validated to be within range. If assigned to GPU,
3045   out-of-bound indices result in safe but unspecified behavior, which may include
3046   raising an error.
3047 
3048   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3049   <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
3050   </div>
3051 
3052   Args:
3053     params: A `Tensor`.
3054     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3055     validate_indices: An optional `bool`. Defaults to `True`.
3056     name: A name for the operation (optional).
3057 
3058   Returns:
3059     A `Tensor`. Has the same type as `params`.
3060   """
3061   _ctx = _context._context
3062   if _ctx is None or not _ctx._eager_context.is_eager:
3063     if validate_indices is None:
3064       validate_indices = True
3065     validate_indices = _execute.make_bool(validate_indices, "validate_indices")
3066     _, _, _op = _op_def_lib._apply_op_helper(
3067         "Gather", params=params, indices=indices,
3068         validate_indices=validate_indices, name=name)
3069     _result = _op.outputs[:]
3070     _inputs_flat = _op.inputs
3071     _attrs = ("validate_indices", _op.get_attr("validate_indices"), "Tparams",
3072               _op.get_attr("Tparams"), "Tindices", _op.get_attr("Tindices"))
3073     _execute.record_gradient(
3074       "Gather", _inputs_flat, _attrs, _result, name)
3075     _result, = _result
3076     return _result
3077 
3078   else:
3079     try:
3080       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3081         _ctx._context_handle, _ctx._eager_context.device_name, "Gather", name,
3082         _ctx._post_execution_callbacks, params, indices, "validate_indices",
3083         validate_indices)
3084       return _result
3085     except _core._FallbackException:
3086       return gather_eager_fallback(
3087           params, indices, validate_indices=validate_indices, name=name,
3088           ctx=_ctx)
3089     except _core._NotOkStatusException as e:
3090       if name is not None:
3091         message = e.message + " name: " + name
3092       else:
3093         message = e.message
3094       _six.raise_from(_core._status_to_exception(e.code, message), None)
3095 
3096 
3097 def gather_eager_fallback(params, indices, validate_indices=True, name=None, ctx=None):
3098   r"""This is the slowpath function for Eager mode.
3099   This is for function gather
3100   """
3101   _ctx = ctx if ctx else _context.context()
3102   if validate_indices is None:
3103     validate_indices = True
3104   validate_indices = _execute.make_bool(validate_indices, "validate_indices")
3105   _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)
3106   _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
3107   _inputs_flat = [params, indices]
3108   _attrs = ("validate_indices", validate_indices, "Tparams", _attr_Tparams,
3109   "Tindices", _attr_Tindices)
3110   _result = _execute.execute(b"Gather", 1, inputs=_inputs_flat, attrs=_attrs,
3111                              ctx=_ctx, name=name)
3112   _execute.record_gradient(
3113       "Gather", _inputs_flat, _attrs, _result, name)
3114   _result, = _result
3115   return _result
3116 
3117 
3118 @tf_export('gather_nd', 'manip.gather_nd')
3119 @deprecated_endpoints('manip.gather_nd')
3120 def gather_nd(params, indices, name=None):
3121   r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
3122 
3123   `indices` is an K-dimensional integer tensor, best thought of as a
3124   (K-1)-dimensional tensor of indices into `params`, where each element defines a
3125   slice of `params`:
3126 
3127       output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
3128 
3129   Whereas in `tf.gather` `indices` defines slices into the first
3130   dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
3131   first `N` dimensions of `params`, where `N = indices.shape[-1]`.
3132 
3133   The last dimension of `indices` can be at most the rank of
3134   `params`:
3135 
3136       indices.shape[-1] <= params.rank
3137 
3138   The last dimension of `indices` corresponds to elements
3139   (if `indices.shape[-1] == params.rank`) or slices
3140   (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
3141   of `params`.  The output tensor has shape
3142 
3143       indices.shape[:-1] + params.shape[indices.shape[-1]:]
3144 
3145   Note that on CPU, if an out of bound index is found, an error is returned.
3146   On GPU, if an out of bound index is found, a 0 is stored in the
3147   corresponding output value.
3148 
3149   Some examples below.
3150 
3151   Simple indexing into a matrix:
3152 
3153   ```python
3154       indices = [[0, 0], [1, 1]]
3155       params = [['a', 'b'], ['c', 'd']]
3156       output = ['a', 'd']
3157   ```
3158 
3159   Slice indexing into a matrix:
3160 
3161   ```python
3162       indices = [[1], [0]]
3163       params = [['a', 'b'], ['c', 'd']]
3164       output = [['c', 'd'], ['a', 'b']]
3165   ```
3166 
3167   Indexing into a 3-tensor:
3168 
3169   ```python
3170       indices = [[1]]
3171       params = [[['a0', 'b0'], ['c0', 'd0']],
3172                 [['a1', 'b1'], ['c1', 'd1']]]
3173       output = [[['a1', 'b1'], ['c1', 'd1']]]
3174 
3175 
3176       indices = [[0, 1], [1, 0]]
3177       params = [[['a0', 'b0'], ['c0', 'd0']],
3178                 [['a1', 'b1'], ['c1', 'd1']]]
3179       output = [['c0', 'd0'], ['a1', 'b1']]
3180 
3181 
3182       indices = [[0, 0, 1], [1, 0, 1]]
3183       params = [[['a0', 'b0'], ['c0', 'd0']],
3184                 [['a1', 'b1'], ['c1', 'd1']]]
3185       output = ['b0', 'b1']
3186   ```
3187 
3188   Batched indexing into a matrix:
3189 
3190   ```python
3191       indices = [[[0, 0]], [[0, 1]]]
3192       params = [['a', 'b'], ['c', 'd']]
3193       output = [['a'], ['b']]
3194   ```
3195 
3196   Batched slice indexing into a matrix:
3197 
3198   ```python
3199       indices = [[[1]], [[0]]]
3200       params = [['a', 'b'], ['c', 'd']]
3201       output = [[['c', 'd']], [['a', 'b']]]
3202   ```
3203 
3204   Batched indexing into a 3-tensor:
3205 
3206   ```python
3207       indices = [[[1]], [[0]]]
3208       params = [[['a0', 'b0'], ['c0', 'd0']],
3209                 [['a1', 'b1'], ['c1', 'd1']]]
3210       output = [[[['a1', 'b1'], ['c1', 'd1']]],
3211                 [[['a0', 'b0'], ['c0', 'd0']]]]
3212 
3213       indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
3214       params = [[['a0', 'b0'], ['c0', 'd0']],
3215                 [['a1', 'b1'], ['c1', 'd1']]]
3216       output = [[['c0', 'd0'], ['a1', 'b1']],
3217                 [['a0', 'b0'], ['c1', 'd1']]]
3218 
3219 
3220       indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
3221       params = [[['a0', 'b0'], ['c0', 'd0']],
3222                 [['a1', 'b1'], ['c1', 'd1']]]
3223       output = [['b0', 'b1'], ['d0', 'c1']]
3224   ```
3225 
3226   See also `tf.gather` and `tf.batch_gather`.
3227 
3228   Args:
3229     params: A `Tensor`. The tensor from which to gather values.
3230     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3231       Index tensor.
3232     name: A name for the operation (optional).
3233 
3234   Returns:
3235     A `Tensor`. Has the same type as `params`.
3236   """
3237   _ctx = _context._context
3238   if _ctx is None or not _ctx._eager_context.is_eager:
3239     _, _, _op = _op_def_lib._apply_op_helper(
3240         "GatherNd", params=params, indices=indices, name=name)
3241     _result = _op.outputs[:]
3242     _inputs_flat = _op.inputs
3243     _attrs = ("Tparams", _op.get_attr("Tparams"), "Tindices",
3244               _op.get_attr("Tindices"))
3245     _execute.record_gradient(
3246       "GatherNd", _inputs_flat, _attrs, _result, name)
3247     _result, = _result
3248     return _result
3249 
3250   else:
3251     try:
3252       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3253         _ctx._context_handle, _ctx._eager_context.device_name, "GatherNd",
3254         name, _ctx._post_execution_callbacks, params, indices)
3255       return _result
3256     except _core._FallbackException:
3257       return gather_nd_eager_fallback(
3258           params, indices, name=name, ctx=_ctx)
3259     except _core._NotOkStatusException as e:
3260       if name is not None:
3261         message = e.message + " name: " + name
3262       else:
3263         message = e.message
3264       _six.raise_from(_core._status_to_exception(e.code, message), None)
3265 
3266 
3267 def gather_nd_eager_fallback(params, indices, name=None, ctx=None):
3268   r"""This is the slowpath function for Eager mode.
3269   This is for function gather_nd
3270   """
3271   _ctx = ctx if ctx else _context.context()
3272   _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)
3273   _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
3274   _inputs_flat = [params, indices]
3275   _attrs = ("Tparams", _attr_Tparams, "Tindices", _attr_Tindices)
3276   _result = _execute.execute(b"GatherNd", 1, inputs=_inputs_flat,
3277                              attrs=_attrs, ctx=_ctx, name=name)
3278   _execute.record_gradient(
3279       "GatherNd", _inputs_flat, _attrs, _result, name)
3280   _result, = _result
3281   return _result
3282 
3283 
3284 def gather_v2(params, indices, axis, name=None):
3285   r"""Gather slices from `params` axis `axis` according to `indices`.
3286 
3287   `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
3288   Produces an output tensor with shape `params.shape[:axis] + indices.shape +
3289   params.shape[axis + 1:]` where:
3290 
3291   ```python
3292       # Scalar indices (output is rank(params) - 1).
3293       output[a_0, ..., a_n, b_0, ..., b_n] =
3294         params[a_0, ..., a_n, indices, b_0, ..., b_n]
3295 
3296       # Vector indices (output is rank(params)).
3297       output[a_0, ..., a_n, i, b_0, ..., b_n] =
3298         params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3299 
3300       # Higher rank indices (output is rank(params) + rank(indices) - 1).
3301       output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3302         params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3303   ```
3304 
3305   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3306   <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
3307   </div>
3308 
3309   Note that on CPU, if an out of bound index is found, an error is returned.
3310   On GPU, if an out of bound index is found, a 0 is stored in the
3311   corresponding output value.
3312 
3313   See also `tf.batch_gather` and `tf.gather_nd`.
3314 
3315   Args:
3316     params: A `Tensor`.
3317       The tensor from which to gather values. Must be at least rank
3318       `axis + 1`.
3319     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3320       Index tensor. Must be in range `[0, params.shape[axis])`.
3321     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3322       The axis in `params` to gather `indices` from. Defaults to the first
3323       dimension. Supports negative indexes.
3324     name: A name for the operation (optional).
3325 
3326   Returns:
3327     A `Tensor`. Has the same type as `params`.
3328   """
3329   _ctx = _context._context
3330   if _ctx is None or not _ctx._eager_context.is_eager:
3331     _, _, _op = _op_def_lib._apply_op_helper(
3332         "GatherV2", params=params, indices=indices, axis=axis, name=name)
3333     _result = _op.outputs[:]
3334     _inputs_flat = _op.inputs
3335     _attrs = ("Tparams", _op.get_attr("Tparams"), "Tindices",
3336               _op.get_attr("Tindices"), "Taxis", _op.get_attr("Taxis"))
3337     _execute.record_gradient(
3338       "GatherV2", _inputs_flat, _attrs, _result, name)
3339     _result, = _result
3340     return _result
3341 
3342   else:
3343     try:
3344       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3345         _ctx._context_handle, _ctx._eager_context.device_name, "GatherV2",
3346         name, _ctx._post_execution_callbacks, params, indices, axis)
3347       return _result
3348     except _core._FallbackException:
3349       return gather_v2_eager_fallback(
3350           params, indices, axis, name=name, ctx=_ctx)
3351     except _core._NotOkStatusException as e:
3352       if name is not None:
3353         message = e.message + " name: " + name
3354       else:
3355         message = e.message
3356       _six.raise_from(_core._status_to_exception(e.code, message), None)
3357 
3358 
3359 def gather_v2_eager_fallback(params, indices, axis, name=None, ctx=None):
3360   r"""This is the slowpath function for Eager mode.
3361   This is for function gather_v2
3362   """
3363   _ctx = ctx if ctx else _context.context()
3364   _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)
3365   _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
3366   _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx)
3367   _inputs_flat = [params, indices, axis]
3368   _attrs = ("Tparams", _attr_Tparams, "Tindices", _attr_Tindices, "Taxis",
3369   _attr_Taxis)
3370   _result = _execute.execute(b"GatherV2", 1, inputs=_inputs_flat,
3371                              attrs=_attrs, ctx=_ctx, name=name)
3372   _execute.record_gradient(
3373       "GatherV2", _inputs_flat, _attrs, _result, name)
3374   _result, = _result
3375   return _result
3376 
3377 
3378 @tf_export('guarantee_const')
3379 def guarantee_const(input, name=None):
3380   r"""Gives a guarantee to the TF runtime that the input tensor is a constant.
3381 
3382   The runtime is then free to make optimizations based on this.
3383 
3384   Only accepts value typed tensors as inputs and rejects resource variable handles
3385   as input.
3386 
3387   Returns the input tensor without modification.
3388 
3389   Args:
3390     input: A `Tensor`.
3391     name: A name for the operation (optional).
3392 
3393   Returns:
3394     A `Tensor`. Has the same type as `input`.
3395   """
3396   _ctx = _context._context
3397   if _ctx is None or not _ctx._eager_context.is_eager:
3398     _, _, _op = _op_def_lib._apply_op_helper(
3399         "GuaranteeConst", input=input, name=name)
3400     _result = _op.outputs[:]
3401     _inputs_flat = _op.inputs
3402     _attrs = ("T", _op.get_attr("T"))
3403     _execute.record_gradient(
3404       "GuaranteeConst", _inputs_flat, _attrs, _result, name)
3405     _result, = _result
3406     return _result
3407 
3408   else:
3409     try:
3410       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3411         _ctx._context_handle, _ctx._eager_context.device_name,
3412         "GuaranteeConst", name, _ctx._post_execution_callbacks, input)
3413       return _result
3414     except _core._FallbackException:
3415       return guarantee_const_eager_fallback(
3416           input, name=name, ctx=_ctx)
3417     except _core._NotOkStatusException as e:
3418       if name is not None:
3419         message = e.message + " name: " + name
3420       else:
3421         message = e.message
3422       _six.raise_from(_core._status_to_exception(e.code, message), None)
3423 
3424 
3425 def guarantee_const_eager_fallback(input, name=None, ctx=None):
3426   r"""This is the slowpath function for Eager mode.
3427   This is for function guarantee_const
3428   """
3429   _ctx = ctx if ctx else _context.context()
3430   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
3431   _inputs_flat = [input]
3432   _attrs = ("T", _attr_T)
3433   _result = _execute.execute(b"GuaranteeConst", 1, inputs=_inputs_flat,
3434                              attrs=_attrs, ctx=_ctx, name=name)
3435   _execute.record_gradient(
3436       "GuaranteeConst", _inputs_flat, _attrs, _result, name)
3437   _result, = _result
3438   return _result
3439 
3440 
3441 def identity(input, name=None):
3442   r"""Return a tensor with the same shape and contents as the input tensor or value.
3443 
3444   Args:
3445     input: A `Tensor`.
3446     name: A name for the operation (optional).
3447 
3448   Returns:
3449     A `Tensor`. Has the same type as `input`.
3450   """
3451   _ctx = _context._context
3452   if _ctx is None or not _ctx._eager_context.is_eager:
3453     _, _, _op = _op_def_lib._apply_op_helper(
3454         "Identity", input=input, name=name)
3455     _result = _op.outputs[:]
3456     _inputs_flat = _op.inputs
3457     _attrs = ("T", _op.get_attr("T"))
3458     _execute.record_gradient(
3459       "Identity", _inputs_flat, _attrs, _result, name)
3460     _result, = _result
3461     return _result
3462 
3463   else:
3464     try:
3465       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3466         _ctx._context_handle, _ctx._eager_context.device_name, "Identity",
3467         name, _ctx._post_execution_callbacks, input)
3468       return _result
3469     except _core._FallbackException:
3470       return identity_eager_fallback(
3471           input, name=name, ctx=_ctx)
3472     except _core._NotOkStatusException as e:
3473       if name is not None:
3474         message = e.message + " name: " + name
3475       else:
3476         message = e.message
3477       _six.raise_from(_core._status_to_exception(e.code, message), None)
3478 
3479 
3480 def identity_eager_fallback(input, name=None, ctx=None):
3481   r"""This is the slowpath function for Eager mode.
3482   This is for function identity
3483   """
3484   _ctx = ctx if ctx else _context.context()
3485   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
3486   _inputs_flat = [input]
3487   _attrs = ("T", _attr_T)
3488   _result = _execute.execute(b"Identity", 1, inputs=_inputs_flat,
3489                              attrs=_attrs, ctx=_ctx, name=name)
3490   _execute.record_gradient(
3491       "Identity", _inputs_flat, _attrs, _result, name)
3492   _result, = _result
3493   return _result
3494 
3495 
3496 @tf_export('identity_n')
3497 def identity_n(input, name=None):
3498   r"""Returns a list of tensors with the same shapes and contents as the input
3499 
3500   tensors.
3501 
3502   This op can be used to override the gradient for complicated functions. For
3503   example, suppose y = f(x) and we wish to apply a custom function g for backprop
3504   such that dx = g(dy). In Python,
3505 
3506   ```python
3507   with tf.get_default_graph().gradient_override_map(
3508       {'IdentityN': 'OverrideGradientWithG'}):
3509     y, _ = identity_n([f(x), x])
3510 
3511   @tf.RegisterGradient('OverrideGradientWithG')
3512   def ApplyG(op, dy, _):
3513     return [None, g(dy)]  # Do not backprop to f(x).
3514   ```
3515 
3516   Args:
3517     input: A list of `Tensor` objects.
3518     name: A name for the operation (optional).
3519 
3520   Returns:
3521     A list of `Tensor` objects. Has the same type as `input`.
3522   """
3523   _ctx = _context._context
3524   if _ctx is None or not _ctx._eager_context.is_eager:
3525     _, _, _op = _op_def_lib._apply_op_helper(
3526         "IdentityN", input=input, name=name)
3527     _result = _op.outputs[:]
3528     _inputs_flat = _op.inputs
3529     _attrs = ("T", _op.get_attr("T"))
3530     _execute.record_gradient(
3531       "IdentityN", _inputs_flat, _attrs, _result, name)
3532     return _result
3533 
3534   else:
3535     try:
3536       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3537         _ctx._context_handle, _ctx._eager_context.device_name, "IdentityN",
3538         name, _ctx._post_execution_callbacks, input)
3539       return _result
3540     except _core._FallbackException:
3541       return identity_n_eager_fallback(
3542           input, name=name, ctx=_ctx)
3543     except _core._NotOkStatusException as e:
3544       if name is not None:
3545         message = e.message + " name: " + name
3546       else:
3547         message = e.message
3548       _six.raise_from(_core._status_to_exception(e.code, message), None)
3549 
3550 
3551 def identity_n_eager_fallback(input, name=None, ctx=None):
3552   r"""This is the slowpath function for Eager mode.
3553   This is for function identity_n
3554   """
3555   _ctx = ctx if ctx else _context.context()
3556   _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
3557   _inputs_flat = list(input)
3558   _attrs = ("T", _attr_T)
3559   _result = _execute.execute(b"IdentityN", len(input), inputs=_inputs_flat,
3560                              attrs=_attrs, ctx=_ctx, name=name)
3561   _execute.record_gradient(
3562       "IdentityN", _inputs_flat, _attrs, _result, name)
3563   return _result
3564 
3565 
3566 def immutable_const(dtype, shape, memory_region_name, name=None):
3567   r"""Returns immutable tensor from memory region.
3568 
3569   The current implementation memmaps the tensor from a file.
3570 
3571   Args:
3572     dtype: A `tf.DType`. Type of the returned tensor.
3573     shape: A `tf.TensorShape` or list of `ints`. Shape of the returned tensor.
3574     memory_region_name: A `string`.
3575       Name of readonly memory region used by the tensor, see
3576       NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
3577     name: A name for the operation (optional).
3578 
3579   Returns:
3580     A `Tensor` of type `dtype`.
3581   """
3582   _ctx = _context._context
3583   if _ctx is None or not _ctx._eager_context.is_eager:
3584     dtype = _execute.make_type(dtype, "dtype")
3585     shape = _execute.make_shape(shape, "shape")
3586     memory_region_name = _execute.make_str(memory_region_name, "memory_region_name")
3587     _, _, _op = _op_def_lib._apply_op_helper(
3588         "ImmutableConst", dtype=dtype, shape=shape,
3589         memory_region_name=memory_region_name, name=name)
3590     _result = _op.outputs[:]
3591     _inputs_flat = _op.inputs
3592     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"),
3593               "memory_region_name", _op.get_attr("memory_region_name"))
3594     _execute.record_gradient(
3595       "ImmutableConst", _inputs_flat, _attrs, _result, name)
3596     _result, = _result
3597     return _result
3598 
3599   else:
3600     try:
3601       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3602         _ctx._context_handle, _ctx._eager_context.device_name,
3603         "ImmutableConst", name, _ctx._post_execution_callbacks, "dtype",
3604         dtype, "shape", shape, "memory_region_name", memory_region_name)
3605       return _result
3606     except _core._FallbackException:
3607       return immutable_const_eager_fallback(
3608           dtype=dtype, shape=shape, memory_region_name=memory_region_name,
3609           name=name, ctx=_ctx)
3610     except _core._NotOkStatusException as e:
3611       if name is not None:
3612         message = e.message + " name: " + name
3613       else:
3614         message = e.message
3615       _six.raise_from(_core._status_to_exception(e.code, message), None)
3616 
3617 
3618 def immutable_const_eager_fallback(dtype, shape, memory_region_name, name=None, ctx=None):
3619   r"""This is the slowpath function for Eager mode.
3620   This is for function immutable_const
3621   """
3622   _ctx = ctx if ctx else _context.context()
3623   dtype = _execute.make_type(dtype, "dtype")
3624   shape = _execute.make_shape(shape, "shape")
3625   memory_region_name = _execute.make_str(memory_region_name, "memory_region_name")
3626   _inputs_flat = []
3627   _attrs = ("dtype", dtype, "shape", shape, "memory_region_name",
3628   memory_region_name)
3629   _result = _execute.execute(b"ImmutableConst", 1, inputs=_inputs_flat,
3630                              attrs=_attrs, ctx=_ctx, name=name)
3631   _execute.record_gradient(
3632       "ImmutableConst", _inputs_flat, _attrs, _result, name)
3633   _result, = _result
3634   return _result
3635 
3636 
3637 def inplace_add(x, i, v, name=None):
3638   r"""    Adds v into specified rows of x.
3639 
3640     Computes y = x; y[i, :] += v; return y.
3641 
3642   Args:
3643     x: A `Tensor`. A `Tensor` of type T.
3644     i: A `Tensor` of type `int32`.
3645       A vector. Indices into the left-most dimension of `x`.
3646     v: A `Tensor`. Must have the same type as `x`.
3647       A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
3648     name: A name for the operation (optional).
3649 
3650   Returns:
3651     A `Tensor`. Has the same type as `x`.
3652   """
3653   _ctx = _context._context
3654   if _ctx is None or not _ctx._eager_context.is_eager:
3655     _, _, _op = _op_def_lib._apply_op_helper(
3656         "InplaceAdd", x=x, i=i, v=v, name=name)
3657     _result = _op.outputs[:]
3658     _inputs_flat = _op.inputs
3659     _attrs = ("T", _op.get_attr("T"))
3660     _execute.record_gradient(
3661       "InplaceAdd", _inputs_flat, _attrs, _result, name)
3662     _result, = _result
3663     return _result
3664 
3665   else:
3666     try:
3667       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3668         _ctx._context_handle, _ctx._eager_context.device_name, "InplaceAdd",
3669         name, _ctx._post_execution_callbacks, x, i, v)
3670       return _result
3671     except _core._FallbackException:
3672       return inplace_add_eager_fallback(
3673           x, i, v, name=name, ctx=_ctx)
3674     except _core._NotOkStatusException as e:
3675       if name is not None:
3676         message = e.message + " name: " + name
3677       else:
3678         message = e.message
3679       _six.raise_from(_core._status_to_exception(e.code, message), None)
3680 
3681 
3682 def inplace_add_eager_fallback(x, i, v, name=None, ctx=None):
3683   r"""This is the slowpath function for Eager mode.
3684   This is for function inplace_add
3685   """
3686   _ctx = ctx if ctx else _context.context()
3687   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)
3688   (x, v) = _inputs_T
3689   i = _ops.convert_to_tensor(i, _dtypes.int32)
3690   _inputs_flat = [x, i, v]
3691   _attrs = ("T", _attr_T)
3692   _result = _execute.execute(b"InplaceAdd", 1, inputs=_inputs_flat,
3693                              attrs=_attrs, ctx=_ctx, name=name)
3694   _execute.record_gradient(
3695       "InplaceAdd", _inputs_flat, _attrs, _result, name)
3696   _result, = _result
3697   return _result
3698 
3699 
3700 def inplace_sub(x, i, v, name=None):
3701   r"""    Subtracts `v` into specified rows of `x`.
3702 
3703     Computes y = x; y[i, :] -= v; return y.
3704 
3705   Args:
3706     x: A `Tensor`. A `Tensor` of type T.
3707     i: A `Tensor` of type `int32`.
3708       A vector. Indices into the left-most dimension of `x`.
3709     v: A `Tensor`. Must have the same type as `x`.
3710       A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
3711     name: A name for the operation (optional).
3712 
3713   Returns:
3714     A `Tensor`. Has the same type as `x`.
3715   """
3716   _ctx = _context._context
3717   if _ctx is None or not _ctx._eager_context.is_eager:
3718     _, _, _op = _op_def_lib._apply_op_helper(
3719         "InplaceSub", x=x, i=i, v=v, name=name)
3720     _result = _op.outputs[:]
3721     _inputs_flat = _op.inputs
3722     _attrs = ("T", _op.get_attr("T"))
3723     _execute.record_gradient(
3724       "InplaceSub", _inputs_flat, _attrs, _result, name)
3725     _result, = _result
3726     return _result
3727 
3728   else:
3729     try:
3730       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3731         _ctx._context_handle, _ctx._eager_context.device_name, "InplaceSub",
3732         name, _ctx._post_execution_callbacks, x, i, v)
3733       return _result
3734     except _core._FallbackException:
3735       return inplace_sub_eager_fallback(
3736           x, i, v, name=name, ctx=_ctx)
3737     except _core._NotOkStatusException as e:
3738       if name is not None:
3739         message = e.message + " name: " + name
3740       else:
3741         message = e.message
3742       _six.raise_from(_core._status_to_exception(e.code, message), None)
3743 
3744 
3745 def inplace_sub_eager_fallback(x, i, v, name=None, ctx=None):
3746   r"""This is the slowpath function for Eager mode.
3747   This is for function inplace_sub
3748   """
3749   _ctx = ctx if ctx else _context.context()
3750   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)
3751   (x, v) = _inputs_T
3752   i = _ops.convert_to_tensor(i, _dtypes.int32)
3753   _inputs_flat = [x, i, v]
3754   _attrs = ("T", _attr_T)
3755   _result = _execute.execute(b"InplaceSub", 1, inputs=_inputs_flat,
3756                              attrs=_attrs, ctx=_ctx, name=name)
3757   _execute.record_gradient(
3758       "InplaceSub", _inputs_flat, _attrs, _result, name)
3759   _result, = _result
3760   return _result
3761 
3762 
3763 def inplace_update(x, i, v, name=None):
3764   r"""    Updates specified rows with values in `v`.
3765 
3766     Computes `x[i, :] = v; return x`.
3767 
3768   Args:
3769     x: A `Tensor`. A tensor of type `T`.
3770     i: A `Tensor` of type `int32`.
3771       A vector. Indices into the left-most dimension of `x`.
3772     v: A `Tensor`. Must have the same type as `x`.
3773       A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
3774     name: A name for the operation (optional).
3775 
3776   Returns:
3777     A `Tensor`. Has the same type as `x`.
3778   """
3779   _ctx = _context._context
3780   if _ctx is None or not _ctx._eager_context.is_eager:
3781     _, _, _op = _op_def_lib._apply_op_helper(
3782         "InplaceUpdate", x=x, i=i, v=v, name=name)
3783     _result = _op.outputs[:]
3784     _inputs_flat = _op.inputs
3785     _attrs = ("T", _op.get_attr("T"))
3786     _execute.record_gradient(
3787       "InplaceUpdate", _inputs_flat, _attrs, _result, name)
3788     _result, = _result
3789     return _result
3790 
3791   else:
3792     try:
3793       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3794         _ctx._context_handle, _ctx._eager_context.device_name,
3795         "InplaceUpdate", name, _ctx._post_execution_callbacks, x, i, v)
3796       return _result
3797     except _core._FallbackException:
3798       return inplace_update_eager_fallback(
3799           x, i, v, name=name, ctx=_ctx)
3800     except _core._NotOkStatusException as e:
3801       if name is not None:
3802         message = e.message + " name: " + name
3803       else:
3804         message = e.message
3805       _six.raise_from(_core._status_to_exception(e.code, message), None)
3806 
3807 
3808 def inplace_update_eager_fallback(x, i, v, name=None, ctx=None):
3809   r"""This is the slowpath function for Eager mode.
3810   This is for function inplace_update
3811   """
3812   _ctx = ctx if ctx else _context.context()
3813   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)
3814   (x, v) = _inputs_T
3815   i = _ops.convert_to_tensor(i, _dtypes.int32)
3816   _inputs_flat = [x, i, v]
3817   _attrs = ("T", _attr_T)
3818   _result = _execute.execute(b"InplaceUpdate", 1, inputs=_inputs_flat,
3819                              attrs=_attrs, ctx=_ctx, name=name)
3820   _execute.record_gradient(
3821       "InplaceUpdate", _inputs_flat, _attrs, _result, name)
3822   _result, = _result
3823   return _result
3824 
3825 
3826 @tf_export('math.invert_permutation', 'invert_permutation')
3827 @deprecated_endpoints('invert_permutation')
3828 def invert_permutation(x, name=None):
3829   r"""Computes the inverse permutation of a tensor.
3830 
3831   This operation computes the inverse of an index permutation. It takes a 1-D
3832   integer tensor `x`, which represents the indices of a zero-based array, and
3833   swaps each value with its index position. In other words, for an output tensor
3834   `y` and an input tensor `x`, this operation computes the following:
3835 
3836   `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
3837 
3838   The values must include 0. There can be no duplicate values or negative values.
3839 
3840   For example:
3841 
3842   ```
3843   # tensor `x` is [3, 4, 0, 2, 1]
3844   invert_permutation(x) ==> [2, 4, 3, 0, 1]
3845   ```
3846 
3847   Args:
3848     x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
3849     name: A name for the operation (optional).
3850 
3851   Returns:
3852     A `Tensor`. Has the same type as `x`.
3853   """
3854   _ctx = _context._context
3855   if _ctx is None or not _ctx._eager_context.is_eager:
3856     _, _, _op = _op_def_lib._apply_op_helper(
3857         "InvertPermutation", x=x, name=name)
3858     _result = _op.outputs[:]
3859     _inputs_flat = _op.inputs
3860     _attrs = ("T", _op.get_attr("T"))
3861     _execute.record_gradient(
3862       "InvertPermutation", _inputs_flat, _attrs, _result, name)
3863     _result, = _result
3864     return _result
3865 
3866   else:
3867     try:
3868       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3869         _ctx._context_handle, _ctx._eager_context.device_name,
3870         "InvertPermutation", name, _ctx._post_execution_callbacks, x)
3871       return _result
3872     except _core._FallbackException:
3873       return invert_permutation_eager_fallback(
3874           x, name=name, ctx=_ctx)
3875     except _core._NotOkStatusException as e:
3876       if name is not None:
3877         message = e.message + " name: " + name
3878       else:
3879         message = e.message
3880       _six.raise_from(_core._status_to_exception(e.code, message), None)
3881 
3882 
3883 def invert_permutation_eager_fallback(x, name=None, ctx=None):
3884   r"""This is the slowpath function for Eager mode.
3885   This is for function invert_permutation
3886   """
3887   _ctx = ctx if ctx else _context.context()
3888   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)
3889   _inputs_flat = [x]
3890   _attrs = ("T", _attr_T)
3891   _result = _execute.execute(b"InvertPermutation", 1, inputs=_inputs_flat,
3892                              attrs=_attrs, ctx=_ctx, name=name)
3893   _execute.record_gradient(
3894       "InvertPermutation", _inputs_flat, _attrs, _result, name)
3895   _result, = _result
3896   return _result
3897 
3898 
3899 _list_diff_outputs = ["out", "idx"]
3900 _ListDiffOutput = _collections.namedtuple(
3901     "ListDiff", _list_diff_outputs)
3902 
3903 
3904 def list_diff(x, y, out_idx=_dtypes.int32, name=None):
3905   r"""Computes the difference between two lists of numbers or strings.
3906 
3907   Given a list `x` and a list `y`, this operation returns a list `out` that
3908   represents all values that are in `x` but not in `y`. The returned list `out`
3909   is sorted in the same order that the numbers appear in `x` (duplicates are
3910   preserved). This operation also returns a list `idx` that represents the
3911   position of each `out` element in `x`. In other words:
3912 
3913   `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
3914 
3915   For example, given this input:
3916 
3917   ```
3918   x = [1, 2, 3, 4, 5, 6]
3919   y = [1, 3, 5]
3920   ```
3921 
3922   This operation would return:
3923 
3924   ```
3925   out ==> [2, 4, 6]
3926   idx ==> [1, 3, 5]
3927   ```
3928 
3929   Args:
3930     x: A `Tensor`. 1-D. Values to keep.
3931     y: A `Tensor`. Must have the same type as `x`. 1-D. Values to remove.
3932     out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
3933     name: A name for the operation (optional).
3934 
3935   Returns:
3936     A tuple of `Tensor` objects (out, idx).
3937 
3938     out: A `Tensor`. Has the same type as `x`.
3939     idx: A `Tensor` of type `out_idx`.
3940   """
3941   _ctx = _context._context
3942   if _ctx is None or not _ctx._eager_context.is_eager:
3943     if out_idx is None:
3944       out_idx = _dtypes.int32
3945     out_idx = _execute.make_type(out_idx, "out_idx")
3946     _, _, _op = _op_def_lib._apply_op_helper(
3947         "ListDiff", x=x, y=y, out_idx=out_idx, name=name)
3948     _result = _op.outputs[:]
3949     _inputs_flat = _op.inputs
3950     _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx"))
3951     _execute.record_gradient(
3952       "ListDiff", _inputs_flat, _attrs, _result, name)
3953     _result = _ListDiffOutput._make(_result)
3954     return _result
3955 
3956   else:
3957     try:
3958       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3959         _ctx._context_handle, _ctx._eager_context.device_name, "ListDiff",
3960         name, _ctx._post_execution_callbacks, x, y, "out_idx", out_idx)
3961       _result = _ListDiffOutput._make(_result)
3962       return _result
3963     except _core._FallbackException:
3964       return list_diff_eager_fallback(
3965           x, y, out_idx=out_idx, name=name, ctx=_ctx)
3966     except _core._NotOkStatusException as e:
3967       if name is not None:
3968         message = e.message + " name: " + name
3969       else:
3970         message = e.message
3971       _six.raise_from(_core._status_to_exception(e.code, message), None)
3972 
3973 
3974 def list_diff_eager_fallback(x, y, out_idx=_dtypes.int32, name=None, ctx=None):
3975   r"""This is the slowpath function for Eager mode.
3976   This is for function list_diff
3977   """
3978   _ctx = ctx if ctx else _context.context()
3979   if out_idx is None:
3980     out_idx = _dtypes.int32
3981   out_idx = _execute.make_type(out_idx, "out_idx")
3982   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
3983   (x, y) = _inputs_T
3984   _inputs_flat = [x, y]
3985   _attrs = ("T", _attr_T, "out_idx", out_idx)
3986   _result = _execute.execute(b"ListDiff", 2, inputs=_inputs_flat,
3987                              attrs=_attrs, ctx=_ctx, name=name)
3988   _execute.record_gradient(
3989       "ListDiff", _inputs_flat, _attrs, _result, name)
3990   _result = _ListDiffOutput._make(_result)
3991   return _result
3992 
3993 
3994 def lower_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None):
3995   r"""Applies lower_bound(sorted_search_values, values) along each row.
3996 
3997   Each set of rows with the same index in (sorted_inputs, values) is treated
3998   independently.  The resulting row is the equivalent of calling
3999   `np.searchsorted(sorted_inputs, values, side='left')`.
4000 
4001   The result is not a global index to the entire 
4002   `Tensor`, but rather just the index in the last dimension.
4003 
4004   A 2-D example:
4005     sorted_sequence = [[0, 3, 9, 9, 10],
4006                        [1, 2, 3, 4, 5]]
4007     values = [[2, 4, 9],
4008               [0, 2, 6]]
4009 
4010     result = LowerBound(sorted_sequence, values)
4011 
4012     result == [[1, 2, 2],
4013                [0, 1, 5]]
4014 
4015   Args:
4016     sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered.
4017     values: A `Tensor`. Must have the same type as `sorted_inputs`.
4018       2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
4019       the values that will be searched for in `sorted_search_values`.
4020     out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
4021     name: A name for the operation (optional).
4022 
4023   Returns:
4024     A `Tensor` of type `out_type`.
4025   """
4026   _ctx = _context._context
4027   if _ctx is None or not _ctx._eager_context.is_eager:
4028     if out_type is None:
4029       out_type = _dtypes.int32
4030     out_type = _execute.make_type(out_type, "out_type")
4031     _, _, _op = _op_def_lib._apply_op_helper(
4032         "LowerBound", sorted_inputs=sorted_inputs, values=values,
4033         out_type=out_type, name=name)
4034     _result = _op.outputs[:]
4035     _inputs_flat = _op.inputs
4036     _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type"))
4037     _execute.record_gradient(
4038       "LowerBound", _inputs_flat, _attrs, _result, name)
4039     _result, = _result
4040     return _result
4041 
4042   else:
4043     try:
4044       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4045         _ctx._context_handle, _ctx._eager_context.device_name, "LowerBound",
4046         name, _ctx._post_execution_callbacks, sorted_inputs, values,
4047         "out_type", out_type)
4048       return _result
4049     except _core._FallbackException:
4050       return lower_bound_eager_fallback(
4051           sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx)
4052     except _core._NotOkStatusException as e:
4053       if name is not None:
4054         message = e.message + " name: " + name
4055       else:
4056         message = e.message
4057       _six.raise_from(_core._status_to_exception(e.code, message), None)
4058 
4059 
4060 def lower_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None):
4061   r"""This is the slowpath function for Eager mode.
4062   This is for function lower_bound
4063   """
4064   _ctx = ctx if ctx else _context.context()
4065   if out_type is None:
4066     out_type = _dtypes.int32
4067   out_type = _execute.make_type(out_type, "out_type")
4068   _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx)
4069   (sorted_inputs, values) = _inputs_T
4070   _inputs_flat = [sorted_inputs, values]
4071   _attrs = ("T", _attr_T, "out_type", out_type)
4072   _result = _execute.execute(b"LowerBound", 1, inputs=_inputs_flat,
4073                              attrs=_attrs, ctx=_ctx, name=name)
4074   _execute.record_gradient(
4075       "LowerBound", _inputs_flat, _attrs, _result, name)
4076   _result, = _result
4077   return _result
4078 
4079 
4080 @tf_export('linalg.band_part', 'matrix_band_part')
4081 @deprecated_endpoints('matrix_band_part')
4082 def matrix_band_part(input, num_lower, num_upper, name=None):
4083   r"""Copy a tensor setting everything outside a central band in each innermost matrix
4084 
4085   to zero.
4086 
4087   The `band` part is computed as follows:
4088   Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
4089   tensor with the same shape where
4090 
4091   `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
4092 
4093   The indicator function
4094 
4095   `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
4096                    (num_upper < 0 || (n-m) <= num_upper)`.
4097 
4098   For example:
4099 
4100   ```
4101   # if 'input' is [[ 0,  1,  2, 3]
4102                    [-1,  0,  1, 2]
4103                    [-2, -1,  0, 1]
4104                    [-3, -2, -1, 0]],
4105 
4106   tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
4107                                          [-1,  0,  1, 2]
4108                                          [ 0, -1,  0, 1]
4109                                          [ 0,  0, -1, 0]],
4110 
4111   tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
4112                                         [-1,  0,  1, 0]
4113                                         [-2, -1,  0, 1]
4114                                         [ 0, -2, -1, 0]]
4115   ```
4116 
4117   Useful special cases:
4118 
4119   ```
4120    tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
4121    tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
4122    tf.matrix_band_part(input, 0, 0) ==> Diagonal.
4123   ```
4124 
4125   Args:
4126     input: A `Tensor`. Rank `k` tensor.
4127     num_lower: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4128       0-D tensor. Number of subdiagonals to keep. If negative, keep entire
4129       lower triangle.
4130     num_upper: A `Tensor`. Must have the same type as `num_lower`.
4131       0-D tensor. Number of superdiagonals to keep. If negative, keep
4132       entire upper triangle.
4133     name: A name for the operation (optional).
4134 
4135   Returns:
4136     A `Tensor`. Has the same type as `input`.
4137   """
4138   _ctx = _context._context
4139   if _ctx is None or not _ctx._eager_context.is_eager:
4140     _, _, _op = _op_def_lib._apply_op_helper(
4141         "MatrixBandPart", input=input, num_lower=num_lower,
4142         num_upper=num_upper, name=name)
4143     _result = _op.outputs[:]
4144     _inputs_flat = _op.inputs
4145     _attrs = ("T", _op.get_attr("T"), "Tindex", _op.get_attr("Tindex"))
4146     _execute.record_gradient(
4147       "MatrixBandPart", _inputs_flat, _attrs, _result, name)
4148     _result, = _result
4149     return _result
4150 
4151   else:
4152     try:
4153       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4154         _ctx._context_handle, _ctx._eager_context.device_name,
4155         "MatrixBandPart", name, _ctx._post_execution_callbacks, input,
4156         num_lower, num_upper)
4157       return _result
4158     except _core._FallbackException:
4159       return matrix_band_part_eager_fallback(
4160           input, num_lower, num_upper, name=name, ctx=_ctx)
4161     except _core._NotOkStatusException as e:
4162       if name is not None:
4163         message = e.message + " name: " + name
4164       else:
4165         message = e.message
4166       _six.raise_from(_core._status_to_exception(e.code, message), None)
4167 
4168 
4169 def matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None):
4170   r"""This is the slowpath function for Eager mode.
4171   This is for function matrix_band_part
4172   """
4173   _ctx = ctx if ctx else _context.context()
4174   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4175   _attr_Tindex, _inputs_Tindex = _execute.args_to_matching_eager([num_lower, num_upper], _ctx, _dtypes.int64)
4176   (num_lower, num_upper) = _inputs_Tindex
4177   _inputs_flat = [input, num_lower, num_upper]
4178   _attrs = ("T", _attr_T, "Tindex", _attr_Tindex)
4179   _result = _execute.execute(b"MatrixBandPart", 1, inputs=_inputs_flat,
4180                              attrs=_attrs, ctx=_ctx, name=name)
4181   _execute.record_gradient(
4182       "MatrixBandPart", _inputs_flat, _attrs, _result, name)
4183   _result, = _result
4184   return _result
4185 
4186 
4187 @tf_export('linalg.diag', 'matrix_diag')
4188 @deprecated_endpoints('matrix_diag')
4189 def matrix_diag(diagonal, name=None):
4190   r"""Returns a batched diagonal tensor with a given batched diagonal values.
4191 
4192   Given a `diagonal`, this operation returns a tensor with the `diagonal` and
4193   everything else padded with zeros. The diagonal is computed as follows:
4194 
4195   Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
4196   tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
4197 
4198   `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
4199 
4200   For example:
4201 
4202   ```
4203   # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
4204 
4205   and diagonal.shape = (2, 4)
4206 
4207   tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
4208                                        [0, 2, 0, 0]
4209                                        [0, 0, 3, 0]
4210                                        [0, 0, 0, 4]],
4211                                       [[5, 0, 0, 0]
4212                                        [0, 6, 0, 0]
4213                                        [0, 0, 7, 0]
4214                                        [0, 0, 0, 8]]]
4215 
4216   which has shape (2, 4, 4)
4217   ```
4218 
4219   Args:
4220     diagonal: A `Tensor`. Rank `k`, where `k >= 1`.
4221     name: A name for the operation (optional).
4222 
4223   Returns:
4224     A `Tensor`. Has the same type as `diagonal`.
4225   """
4226   _ctx = _context._context
4227   if _ctx is None or not _ctx._eager_context.is_eager:
4228     _, _, _op = _op_def_lib._apply_op_helper(
4229         "MatrixDiag", diagonal=diagonal, name=name)
4230     _result = _op.outputs[:]
4231     _inputs_flat = _op.inputs
4232     _attrs = ("T", _op.get_attr("T"))
4233     _execute.record_gradient(
4234       "MatrixDiag", _inputs_flat, _attrs, _result, name)
4235     _result, = _result
4236     return _result
4237 
4238   else:
4239     try:
4240       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4241         _ctx._context_handle, _ctx._eager_context.device_name, "MatrixDiag",
4242         name, _ctx._post_execution_callbacks, diagonal)
4243       return _result
4244     except _core._FallbackException:
4245       return matrix_diag_eager_fallback(
4246           diagonal, name=name, ctx=_ctx)
4247     except _core._NotOkStatusException as e:
4248       if name is not None:
4249         message = e.message + " name: " + name
4250       else:
4251         message = e.message
4252       _six.raise_from(_core._status_to_exception(e.code, message), None)
4253 
4254 
4255 def matrix_diag_eager_fallback(diagonal, name=None, ctx=None):
4256   r"""This is the slowpath function for Eager mode.
4257   This is for function matrix_diag
4258   """
4259   _ctx = ctx if ctx else _context.context()
4260   _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)
4261   _inputs_flat = [diagonal]
4262   _attrs = ("T", _attr_T)
4263   _result = _execute.execute(b"MatrixDiag", 1, inputs=_inputs_flat,
4264                              attrs=_attrs, ctx=_ctx, name=name)
4265   _execute.record_gradient(
4266       "MatrixDiag", _inputs_flat, _attrs, _result, name)
4267   _result, = _result
4268   return _result
4269 
4270 
4271 @tf_export('linalg.diag_part', 'matrix_diag_part')
4272 @deprecated_endpoints('matrix_diag_part')
4273 def matrix_diag_part(input, name=None):
4274   r"""Returns the batched diagonal part of a batched tensor.
4275 
4276   This operation returns a tensor with the `diagonal` part
4277   of the batched `input`. The `diagonal` part is computed as follows:
4278 
4279   Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
4280   tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
4281 
4282   `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
4283 
4284   The input must be at least a matrix.
4285 
4286   For example:
4287 
4288   ```
4289   # 'input' is [[[1, 0, 0, 0]
4290                  [0, 2, 0, 0]
4291                  [0, 0, 3, 0]
4292                  [0, 0, 0, 4]],
4293                 [[5, 0, 0, 0]
4294                  [0, 6, 0, 0]
4295                  [0, 0, 7, 0]
4296                  [0, 0, 0, 8]]]
4297 
4298   and input.shape = (2, 4, 4)
4299 
4300   tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
4301 
4302   which has shape (2, 4)
4303   ```
4304 
4305   Args:
4306     input: A `Tensor`. Rank `k` tensor where `k >= 2`.
4307     name: A name for the operation (optional).
4308 
4309   Returns:
4310     A `Tensor`. Has the same type as `input`.
4311   """
4312   _ctx = _context._context
4313   if _ctx is None or not _ctx._eager_context.is_eager:
4314     _, _, _op = _op_def_lib._apply_op_helper(
4315         "MatrixDiagPart", input=input, name=name)
4316     _result = _op.outputs[:]
4317     _inputs_flat = _op.inputs
4318     _attrs = ("T", _op.get_attr("T"))
4319     _execute.record_gradient(
4320       "MatrixDiagPart", _inputs_flat, _attrs, _result, name)
4321     _result, = _result
4322     return _result
4323 
4324   else:
4325     try:
4326       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4327         _ctx._context_handle, _ctx._eager_context.device_name,
4328         "MatrixDiagPart", name, _ctx._post_execution_callbacks, input)
4329       return _result
4330     except _core._FallbackException:
4331       return matrix_diag_part_eager_fallback(
4332           input, name=name, ctx=_ctx)
4333     except _core._NotOkStatusException as e:
4334       if name is not None:
4335         message = e.message + " name: " + name
4336       else:
4337         message = e.message
4338       _six.raise_from(_core._status_to_exception(e.code, message), None)
4339 
4340 
4341 def matrix_diag_part_eager_fallback(input, name=None, ctx=None):
4342   r"""This is the slowpath function for Eager mode.
4343   This is for function matrix_diag_part
4344   """
4345   _ctx = ctx if ctx else _context.context()
4346   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4347   _inputs_flat = [input]
4348   _attrs = ("T", _attr_T)
4349   _result = _execute.execute(b"MatrixDiagPart", 1, inputs=_inputs_flat,
4350                              attrs=_attrs, ctx=_ctx, name=name)
4351   _execute.record_gradient(
4352       "MatrixDiagPart", _inputs_flat, _attrs, _result, name)
4353   _result, = _result
4354   return _result
4355 
4356 
4357 @tf_export('linalg.set_diag', 'matrix_set_diag')
4358 @deprecated_endpoints('matrix_set_diag')
4359 def matrix_set_diag(input, diagonal, name=None):
4360   r"""Returns a batched matrix tensor with new batched diagonal values.
4361 
4362   Given `input` and `diagonal`, this operation returns a tensor with the
4363   same shape and values as `input`, except for the main diagonal of the
4364   innermost matrices.  These will be overwritten by the values in `diagonal`.
4365 
4366   The output is computed as follows:
4367 
4368   Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
4369   `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
4370   tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
4371 
4372     * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
4373     * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
4374 
4375   Args:
4376     input: A `Tensor`. Rank `k+1`, where `k >= 1`.
4377     diagonal: A `Tensor`. Must have the same type as `input`.
4378       Rank `k`, where `k >= 1`.
4379     name: A name for the operation (optional).
4380 
4381   Returns:
4382     A `Tensor`. Has the same type as `input`.
4383   """
4384   _ctx = _context._context
4385   if _ctx is None or not _ctx._eager_context.is_eager:
4386     _, _, _op = _op_def_lib._apply_op_helper(
4387         "MatrixSetDiag", input=input, diagonal=diagonal, name=name)
4388     _result = _op.outputs[:]
4389     _inputs_flat = _op.inputs
4390     _attrs = ("T", _op.get_attr("T"))
4391     _execute.record_gradient(
4392       "MatrixSetDiag", _inputs_flat, _attrs, _result, name)
4393     _result, = _result
4394     return _result
4395 
4396   else:
4397     try:
4398       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4399         _ctx._context_handle, _ctx._eager_context.device_name,
4400         "MatrixSetDiag", name, _ctx._post_execution_callbacks, input,
4401         diagonal)
4402       return _result
4403     except _core._FallbackException:
4404       return matrix_set_diag_eager_fallback(
4405           input, diagonal, name=name, ctx=_ctx)
4406     except _core._NotOkStatusException as e:
4407       if name is not None:
4408         message = e.message + " name: " + name
4409       else:
4410         message = e.message
4411       _six.raise_from(_core._status_to_exception(e.code, message), None)
4412 
4413 
4414 def matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None):
4415   r"""This is the slowpath function for Eager mode.
4416   This is for function matrix_set_diag
4417   """
4418   _ctx = ctx if ctx else _context.context()
4419   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx)
4420   (input, diagonal) = _inputs_T
4421   _inputs_flat = [input, diagonal]
4422   _attrs = ("T", _attr_T)
4423   _result = _execute.execute(b"MatrixSetDiag", 1, inputs=_inputs_flat,
4424                              attrs=_attrs, ctx=_ctx, name=name)
4425   _execute.record_gradient(
4426       "MatrixSetDiag", _inputs_flat, _attrs, _result, name)
4427   _result, = _result
4428   return _result
4429 
4430 
4431 def mirror_pad(input, paddings, mode, name=None):
4432   r"""Pads a tensor with mirrored values.
4433 
4434   This operation pads a `input` with mirrored values according to the `paddings`
4435   you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
4436   the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
4437   how many values to add before the contents of `input` in that dimension, and
4438   `paddings[D, 1]` indicates how many values to add after the contents of `input`
4439   in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
4440   than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
4441   (if false, respectively).
4442 
4443   The padded size of each dimension D of the output is:
4444 
4445   `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
4446 
4447   For example:
4448 
4449   ```
4450   # 't' is [[1, 2, 3], [4, 5, 6]].
4451   # 'paddings' is [[1, 1]], [2, 2]].
4452   # 'mode' is SYMMETRIC.
4453   # rank of 't' is 2.
4454   pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
4455                         [2, 1, 1, 2, 3, 3, 2]
4456                         [5, 4, 4, 5, 6, 6, 5]
4457                         [5, 4, 4, 5, 6, 6, 5]]
4458   ```
4459 
4460   Args:
4461     input: A `Tensor`. The input tensor to be padded.
4462     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4463       A two-column matrix specifying the padding sizes. The number of
4464       rows must be the same as the rank of `input`.
4465     mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
4466       Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
4467       do not include the borders, while in symmetric mode the padded regions
4468       do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
4469       is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
4470       it is `[1, 2, 3, 3, 2]` in symmetric mode.
4471     name: A name for the operation (optional).
4472 
4473   Returns:
4474     A `Tensor`. Has the same type as `input`.
4475   """
4476   _ctx = _context._context
4477   if _ctx is None or not _ctx._eager_context.is_eager:
4478     mode = _execute.make_str(mode, "mode")
4479     _, _, _op = _op_def_lib._apply_op_helper(
4480         "MirrorPad", input=input, paddings=paddings, mode=mode, name=name)
4481     _result = _op.outputs[:]
4482     _inputs_flat = _op.inputs
4483     _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"),
4484               "mode", _op.get_attr("mode"))
4485     _execute.record_gradient(
4486       "MirrorPad", _inputs_flat, _attrs, _result, name)
4487     _result, = _result
4488     return _result
4489 
4490   else:
4491     try:
4492       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4493         _ctx._context_handle, _ctx._eager_context.device_name, "MirrorPad",
4494         name, _ctx._post_execution_callbacks, input, paddings, "mode", mode)
4495       return _result
4496     except _core._FallbackException:
4497       return mirror_pad_eager_fallback(
4498           input, paddings, mode=mode, name=name, ctx=_ctx)
4499     except _core._NotOkStatusException as e:
4500       if name is not None:
4501         message = e.message + " name: " + name
4502       else:
4503         message = e.message
4504       _six.raise_from(_core._status_to_exception(e.code, message), None)
4505 
4506 
4507 def mirror_pad_eager_fallback(input, paddings, mode, name=None, ctx=None):
4508   r"""This is the slowpath function for Eager mode.
4509   This is for function mirror_pad
4510   """
4511   _ctx = ctx if ctx else _context.context()
4512   mode = _execute.make_str(mode, "mode")
4513   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4514   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
4515   _inputs_flat = [input, paddings]
4516   _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode)
4517   _result = _execute.execute(b"MirrorPad", 1, inputs=_inputs_flat,
4518                              attrs=_attrs, ctx=_ctx, name=name)
4519   _execute.record_gradient(
4520       "MirrorPad", _inputs_flat, _attrs, _result, name)
4521   _result, = _result
4522   return _result
4523 
4524 
4525 def mirror_pad_grad(input, paddings, mode, name=None):
4526   r"""Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
4527 
4528   This operation folds the padded areas of `input` by `MirrorPad` according to the
4529   `paddings` you specify. `paddings` must be the same as `paddings` argument
4530   given to the corresponding `MirrorPad` op.
4531 
4532   The folded size of each dimension D of the output is:
4533 
4534   `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
4535 
4536   For example:
4537 
4538   ```
4539   # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
4540   # 'paddings' is [[0, 1]], [0, 1]].
4541   # 'mode' is SYMMETRIC.
4542   # rank of 't' is 2.
4543   pad(t, paddings) ==> [[ 1,  5]
4544                         [11, 28]]
4545   ```
4546 
4547   Args:
4548     input: A `Tensor`. The input tensor to be folded.
4549     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4550       A two-column matrix specifying the padding sizes. The number of
4551       rows must be the same as the rank of `input`.
4552     mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
4553       The mode used in the `MirrorPad` op.
4554     name: A name for the operation (optional).
4555 
4556   Returns:
4557     A `Tensor`. Has the same type as `input`.
4558   """
4559   _ctx = _context._context
4560   if _ctx is None or not _ctx._eager_context.is_eager:
4561     mode = _execute.make_str(mode, "mode")
4562     _, _, _op = _op_def_lib._apply_op_helper(
4563         "MirrorPadGrad", input=input, paddings=paddings, mode=mode, name=name)
4564     _result = _op.outputs[:]
4565     _inputs_flat = _op.inputs
4566     _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"),
4567               "mode", _op.get_attr("mode"))
4568     _execute.record_gradient(
4569       "MirrorPadGrad", _inputs_flat, _attrs, _result, name)
4570     _result, = _result
4571     return _result
4572 
4573   else:
4574     try:
4575       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4576         _ctx._context_handle, _ctx._eager_context.device_name,
4577         "MirrorPadGrad", name, _ctx._post_execution_callbacks, input,
4578         paddings, "mode", mode)
4579       return _result
4580     except _core._FallbackException:
4581       return mirror_pad_grad_eager_fallback(
4582           input, paddings, mode=mode, name=name, ctx=_ctx)
4583     except _core._NotOkStatusException as e:
4584       if name is not None:
4585         message = e.message + " name: " + name
4586       else:
4587         message = e.message
4588       _six.raise_from(_core._status_to_exception(e.code, message), None)
4589 
4590 
4591 def mirror_pad_grad_eager_fallback(input, paddings, mode, name=None, ctx=None):
4592   r"""This is the slowpath function for Eager mode.
4593   This is for function mirror_pad_grad
4594   """
4595   _ctx = ctx if ctx else _context.context()
4596   mode = _execute.make_str(mode, "mode")
4597   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4598   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
4599   _inputs_flat = [input, paddings]
4600   _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode)
4601   _result = _execute.execute(b"MirrorPadGrad", 1, inputs=_inputs_flat,
4602                              attrs=_attrs, ctx=_ctx, name=name)
4603   _execute.record_gradient(
4604       "MirrorPadGrad", _inputs_flat, _attrs, _result, name)
4605   _result, = _result
4606   return _result
4607 
4608 
4609 def one_hot(indices, depth, on_value, off_value, axis=-1, name=None):
4610   r"""Returns a one-hot tensor.
4611 
4612   The locations represented by indices in `indices` take value `on_value`,
4613   while all other locations take value `off_value`.
4614 
4615   If the input `indices` is rank `N`, the output will have rank `N+1`,
4616   The new axis is created at dimension `axis` (default: the new axis is
4617   appended at the end).
4618 
4619   If `indices` is a scalar the output shape will be a vector of length `depth`.
4620 
4621   If `indices` is a vector of length `features`, the output shape will be:
4622   ```
4623     features x depth if axis == -1
4624     depth x features if axis == 0
4625   ```
4626 
4627   If `indices` is a matrix (batch) with shape `[batch, features]`,
4628   the output shape will be:
4629   ```
4630     batch x features x depth if axis == -1
4631     batch x depth x features if axis == 1
4632     depth x batch x features if axis == 0
4633   ```
4634 
4635 
4636   Examples
4637   =========
4638 
4639   Suppose that
4640 
4641   ```
4642     indices = [0, 2, -1, 1]
4643     depth = 3
4644     on_value = 5.0
4645     off_value = 0.0
4646     axis = -1
4647   ```
4648 
4649   Then output is `[4 x 3]`:
4650 
4651       ```output =
4652         [5.0 0.0 0.0]  // one_hot(0)
4653         [0.0 0.0 5.0]  // one_hot(2)
4654         [0.0 0.0 0.0]  // one_hot(-1)
4655         [0.0 5.0 0.0]  // one_hot(1)
4656       ```
4657 
4658   Suppose that
4659 
4660   ```
4661     indices = [0, 2, -1, 1]
4662     depth = 3
4663     on_value = 0.0
4664     off_value = 3.0
4665     axis = 0
4666   ```
4667 
4668   Then output is `[3 x 4]`:
4669 
4670       ```output =
4671         [0.0 3.0 3.0 3.0]
4672         [3.0 3.0 3.0 0.0]
4673         [3.0 3.0 3.0 3.0]
4674         [3.0 0.0 3.0 3.0]
4675       //  ^                one_hot(0)
4676       //      ^            one_hot(2)
4677       //          ^        one_hot(-1)
4678       //              ^    one_hot(1)
4679       ```
4680   Suppose that
4681 
4682   ```
4683     indices = [[0, 2], [1, -1]]
4684     depth = 3
4685     on_value = 1.0
4686     off_value = 0.0
4687     axis = -1
4688   ```
4689 
4690   Then output is `[2 x 2 x 3]`:
4691 
4692       ```output =
4693         [
4694           [1.0, 0.0, 0.0]  // one_hot(0)
4695           [0.0, 0.0, 1.0]  // one_hot(2)
4696         ][
4697           [0.0, 1.0, 0.0]  // one_hot(1)
4698           [0.0, 0.0, 0.0]  // one_hot(-1)
4699         ]```
4700 
4701   Args:
4702     indices: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`.
4703       A tensor of indices.
4704     depth: A `Tensor` of type `int32`.
4705       A scalar defining the depth of the one hot dimension.
4706     on_value: A `Tensor`.
4707       A scalar defining the value to fill in output when `indices[j] = i`.
4708     off_value: A `Tensor`. Must have the same type as `on_value`.
4709       A scalar defining the value to fill in output when `indices[j] != i`.
4710     axis: An optional `int`. Defaults to `-1`.
4711       The axis to fill (default: -1, a new inner-most axis).
4712     name: A name for the operation (optional).
4713 
4714   Returns:
4715     A `Tensor`. Has the same type as `on_value`.
4716   """
4717   _ctx = _context._context
4718   if _ctx is None or not _ctx._eager_context.is_eager:
4719     if axis is None:
4720       axis = -1
4721     axis = _execute.make_int(axis, "axis")
4722     _, _, _op = _op_def_lib._apply_op_helper(
4723         "OneHot", indices=indices, depth=depth, on_value=on_value,
4724         off_value=off_value, axis=axis, name=name)
4725     _result = _op.outputs[:]
4726     _inputs_flat = _op.inputs
4727     _attrs = ("axis", _op.get_attr("axis"), "T", _op.get_attr("T"), "TI",
4728               _op.get_attr("TI"))
4729     _execute.record_gradient(
4730       "OneHot", _inputs_flat, _attrs, _result, name)
4731     _result, = _result
4732     return _result
4733 
4734   else:
4735     try:
4736       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4737         _ctx._context_handle, _ctx._eager_context.device_name, "OneHot", name,
4738         _ctx._post_execution_callbacks, indices, depth, on_value, off_value,
4739         "axis", axis)
4740       return _result
4741     except _core._FallbackException:
4742       return one_hot_eager_fallback(
4743           indices, depth, on_value, off_value, axis=axis, name=name, ctx=_ctx)
4744     except _core._NotOkStatusException as e:
4745       if name is not None:
4746         message = e.message + " name: " + name
4747       else:
4748         message = e.message
4749       _six.raise_from(_core._status_to_exception(e.code, message), None)
4750 
4751 
4752 def one_hot_eager_fallback(indices, depth, on_value, off_value, axis=-1, name=None, ctx=None):
4753   r"""This is the slowpath function for Eager mode.
4754   This is for function one_hot
4755   """
4756   _ctx = ctx if ctx else _context.context()
4757   if axis is None:
4758     axis = -1
4759   axis = _execute.make_int(axis, "axis")
4760   _attr_T, _inputs_T = _execute.args_to_matching_eager([on_value, off_value], _ctx)
4761   (on_value, off_value) = _inputs_T
4762   _attr_TI, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int64)
4763   depth = _ops.convert_to_tensor(depth, _dtypes.int32)
4764   _inputs_flat = [indices, depth, on_value, off_value]
4765   _attrs = ("axis", axis, "T", _attr_T, "TI", _attr_TI)
4766   _result = _execute.execute(b"OneHot", 1, inputs=_inputs_flat, attrs=_attrs,
4767                              ctx=_ctx, name=name)
4768   _execute.record_gradient(
4769       "OneHot", _inputs_flat, _attrs, _result, name)
4770   _result, = _result
4771   return _result
4772 
4773 
4774 def ones_like(x, name=None):
4775   r"""Returns a tensor of ones with the same shape and type as x.
4776 
4777   Args:
4778     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool`.
4779       a tensor of type T.
4780     name: A name for the operation (optional).
4781 
4782   Returns:
4783     A `Tensor`. Has the same type as `x`.
4784   """
4785   _ctx = _context._context
4786   if _ctx is None or not _ctx._eager_context.is_eager:
4787     _, _, _op = _op_def_lib._apply_op_helper(
4788         "OnesLike", x=x, name=name)
4789     _result = _op.outputs[:]
4790     _inputs_flat = _op.inputs
4791     _attrs = ("T", _op.get_attr("T"))
4792     _execute.record_gradient(
4793       "OnesLike", _inputs_flat, _attrs, _result, name)
4794     _result, = _result
4795     return _result
4796 
4797   else:
4798     try:
4799       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4800         _ctx._context_handle, _ctx._eager_context.device_name, "OnesLike",
4801         name, _ctx._post_execution_callbacks, x)
4802       return _result
4803     except _core._FallbackException:
4804       return ones_like_eager_fallback(
4805           x, name=name, ctx=_ctx)
4806     except _core._NotOkStatusException as e:
4807       if name is not None:
4808         message = e.message + " name: " + name
4809       else:
4810         message = e.message
4811       _six.raise_from(_core._status_to_exception(e.code, message), None)
4812 
4813 
4814 def ones_like_eager_fallback(x, name=None, ctx=None):
4815   r"""This is the slowpath function for Eager mode.
4816   This is for function ones_like
4817   """
4818   _ctx = ctx if ctx else _context.context()
4819   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
4820   _inputs_flat = [x]
4821   _attrs = ("T", _attr_T)
4822   _result = _execute.execute(b"OnesLike", 1, inputs=_inputs_flat,
4823                              attrs=_attrs, ctx=_ctx, name=name)
4824   _execute.record_gradient(
4825       "OnesLike", _inputs_flat, _attrs, _result, name)
4826   _result, = _result
4827   return _result
4828 
4829 
4830 def pack(values, axis=0, name=None):
4831   r"""Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
4832 
4833   Packs the `N` tensors in `values` into a tensor with rank one higher than each
4834   tensor in `values`, by packing them along the `axis` dimension.
4835   Given a list of tensors of shape `(A, B, C)`;
4836 
4837   if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
4838   if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
4839   Etc.
4840 
4841   For example:
4842 
4843   ```
4844   # 'x' is [1, 4]
4845   # 'y' is [2, 5]
4846   # 'z' is [3, 6]
4847   pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
4848   pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
4849   ```
4850 
4851   This is the opposite of `unpack`.
4852 
4853   Args:
4854     values: A list of at least 1 `Tensor` objects with the same type.
4855       Must be of same shape and type.
4856     axis: An optional `int`. Defaults to `0`.
4857       Dimension along which to pack.  Negative values wrap around, so the
4858       valid range is `[-(R+1), R+1)`.
4859     name: A name for the operation (optional).
4860 
4861   Returns:
4862     A `Tensor`. Has the same type as `values`.
4863   """
4864   _ctx = _context._context
4865   if _ctx is None or not _ctx._eager_context.is_eager:
4866     if not isinstance(values, (list, tuple)):
4867       raise TypeError(
4868           "Expected list for 'values' argument to "
4869           "'pack' Op, not %r." % values)
4870     _attr_N = len(values)
4871     if axis is None:
4872       axis = 0
4873     axis = _execute.make_int(axis, "axis")
4874     _, _, _op = _op_def_lib._apply_op_helper(
4875         "Pack", values=values, axis=axis, name=name)
4876     _result = _op.outputs[:]
4877     _inputs_flat = _op.inputs
4878     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "axis",
4879               _op.get_attr("axis"))
4880     _execute.record_gradient(
4881       "Pack", _inputs_flat, _attrs, _result, name)
4882     _result, = _result
4883     return _result
4884 
4885   else:
4886     try:
4887       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4888         _ctx._context_handle, _ctx._eager_context.device_name, "Pack", name,
4889         _ctx._post_execution_callbacks, values, "axis", axis)
4890       return _result
4891     except _core._FallbackException:
4892       return pack_eager_fallback(
4893           values, axis=axis, name=name, ctx=_ctx)
4894     except _core._NotOkStatusException as e:
4895       if name is not None:
4896         message = e.message + " name: " + name
4897       else:
4898         message = e.message
4899       _six.raise_from(_core._status_to_exception(e.code, message), None)
4900 
4901 
4902 def pack_eager_fallback(values, axis=0, name=None, ctx=None):
4903   r"""This is the slowpath function for Eager mode.
4904   This is for function pack
4905   """
4906   _ctx = ctx if ctx else _context.context()
4907   if not isinstance(values, (list, tuple)):
4908     raise TypeError(
4909         "Expected list for 'values' argument to "
4910         "'pack' Op, not %r." % values)
4911   _attr_N = len(values)
4912   if axis is None:
4913     axis = 0
4914   axis = _execute.make_int(axis, "axis")
4915   _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)
4916   _inputs_flat = list(values)
4917   _attrs = ("N", _attr_N, "T", _attr_T, "axis", axis)
4918   _result = _execute.execute(b"Pack", 1, inputs=_inputs_flat, attrs=_attrs,
4919                              ctx=_ctx, name=name)
4920   _execute.record_gradient(
4921       "Pack", _inputs_flat, _attrs, _result, name)
4922   _result, = _result
4923   return _result
4924 
4925 
4926 def pad(input, paddings, name=None):
4927   r"""Pads a tensor with zeros.
4928 
4929   This operation pads a `input` with zeros according to the `paddings` you
4930   specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
4931   rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
4932   how many zeros to add before the contents of `input` in that dimension, and
4933   `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
4934   in that dimension.
4935 
4936   The padded size of each dimension D of the output is:
4937 
4938   `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
4939 
4940   For example:
4941 
4942   ```
4943   # 't' is [[1, 1], [2, 2]]
4944   # 'paddings' is [[1, 1], [2, 2]]
4945   # rank of 't' is 2
4946   pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
4947                         [0, 0, 1, 1, 0, 0]
4948                         [0, 0, 2, 2, 0, 0]
4949                         [0, 0, 0, 0, 0, 0]]
4950   ```
4951 
4952   Args:
4953     input: A `Tensor`.
4954     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4955     name: A name for the operation (optional).
4956 
4957   Returns:
4958     A `Tensor`. Has the same type as `input`.
4959   """
4960   _ctx = _context._context
4961   if _ctx is None or not _ctx._eager_context.is_eager:
4962     _, _, _op = _op_def_lib._apply_op_helper(
4963         "Pad", input=input, paddings=paddings, name=name)
4964     _result = _op.outputs[:]
4965     _inputs_flat = _op.inputs
4966     _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"))
4967     _execute.record_gradient(
4968       "Pad", _inputs_flat, _attrs, _result, name)
4969     _result, = _result
4970     return _result
4971 
4972   else:
4973     try:
4974       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4975         _ctx._context_handle, _ctx._eager_context.device_name, "Pad", name,
4976         _ctx._post_execution_callbacks, input, paddings)
4977       return _result
4978     except _core._FallbackException:
4979       return pad_eager_fallback(
4980           input, paddings, name=name, ctx=_ctx)
4981     except _core._NotOkStatusException as e:
4982       if name is not None:
4983         message = e.message + " name: " + name
4984       else:
4985         message = e.message
4986       _six.raise_from(_core._status_to_exception(e.code, message), None)
4987 
4988 
4989 def pad_eager_fallback(input, paddings, name=None, ctx=None):
4990   r"""This is the slowpath function for Eager mode.
4991   This is for function pad
4992   """
4993   _ctx = ctx if ctx else _context.context()
4994   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4995   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
4996   _inputs_flat = [input, paddings]
4997   _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings)
4998   _result = _execute.execute(b"Pad", 1, inputs=_inputs_flat, attrs=_attrs,
4999                              ctx=_ctx, name=name)
5000   _execute.record_gradient(
5001       "Pad", _inputs_flat, _attrs, _result, name)
5002   _result, = _result
5003   return _result
5004 
5005 
5006 def pad_v2(input, paddings, constant_values, name=None):
5007   r"""Pads a tensor.
5008 
5009   This operation pads `input` according to the `paddings` and `constant_values`
5010   you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
5011   the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
5012   how many padding values to add before the contents of `input` in that dimension,
5013   and `paddings[D, 1]` indicates how many padding values to add after the contents
5014   of `input` in that dimension. `constant_values` is a scalar tensor of the same
5015   type as `input` that indicates the value to use for padding `input`.
5016 
5017   The padded size of each dimension D of the output is:
5018 
5019   `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
5020 
5021   For example:
5022 
5023   ```
5024   # 't' is [[1, 1], [2, 2]]
5025   # 'paddings' is [[1, 1], [2, 2]]
5026   # 'constant_values' is 0
5027   # rank of 't' is 2
5028   pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
5029                         [0, 0, 1, 1, 0, 0]
5030                         [0, 0, 2, 2, 0, 0]
5031                         [0, 0, 0, 0, 0, 0]]
5032   ```
5033 
5034   Args:
5035     input: A `Tensor`.
5036     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
5037     constant_values: A `Tensor`. Must have the same type as `input`.
5038     name: A name for the operation (optional).
5039 
5040   Returns:
5041     A `Tensor`. Has the same type as `input`.
5042   """
5043   _ctx = _context._context
5044   if _ctx is None or not _ctx._eager_context.is_eager:
5045     _, _, _op = _op_def_lib._apply_op_helper(
5046         "PadV2", input=input, paddings=paddings,
5047         constant_values=constant_values, name=name)
5048     _result = _op.outputs[:]
5049     _inputs_flat = _op.inputs
5050     _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"))
5051     _execute.record_gradient(
5052       "PadV2", _inputs_flat, _attrs, _result, name)
5053     _result, = _result
5054     return _result
5055 
5056   else:
5057     try:
5058       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5059         _ctx._context_handle, _ctx._eager_context.device_name, "PadV2", name,
5060         _ctx._post_execution_callbacks, input, paddings, constant_values)
5061       return _result
5062     except _core._FallbackException:
5063       return pad_v2_eager_fallback(
5064           input, paddings, constant_values, name=name, ctx=_ctx)
5065     except _core._NotOkStatusException as e:
5066       if name is not None:
5067         message = e.message + " name: " + name
5068       else:
5069         message = e.message
5070       _six.raise_from(_core._status_to_exception(e.code, message), None)
5071 
5072 
5073 def pad_v2_eager_fallback(input, paddings, constant_values, name=None, ctx=None):
5074   r"""This is the slowpath function for Eager mode.
5075   This is for function pad_v2
5076   """
5077   _ctx = ctx if ctx else _context.context()
5078   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, constant_values], _ctx)
5079   (input, constant_values) = _inputs_T
5080   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
5081   _inputs_flat = [input, paddings, constant_values]
5082   _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings)
5083   _result = _execute.execute(b"PadV2", 1, inputs=_inputs_flat, attrs=_attrs,
5084                              ctx=_ctx, name=name)
5085   _execute.record_gradient(
5086       "PadV2", _inputs_flat, _attrs, _result, name)
5087   _result, = _result
5088   return _result
5089 
5090 
5091 def parallel_concat(values, shape, name=None):
5092   r"""Concatenates a list of `N` tensors along the first dimension.
5093 
5094   The input tensors are all required to have size 1 in the first dimension.
5095 
5096   For example:
5097 
5098   ```
5099   # 'x' is [[1, 4]]
5100   # 'y' is [[2, 5]]
5101   # 'z' is [[3, 6]]
5102   parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
5103   ```
5104 
5105   The difference between concat and parallel_concat is that concat requires all
5106   of the inputs be computed before the operation will begin but doesn't require
5107   that the input shapes be known during graph construction.  Parallel concat
5108   will copy pieces of the input into the output as they become available, in
5109   some situations this can provide a performance benefit.
5110 
5111   Args:
5112     values: A list of at least 1 `Tensor` objects with the same type.
5113       Tensors to be concatenated. All must have size 1 in the first dimension
5114       and same shape.
5115     shape: A `tf.TensorShape` or list of `ints`.
5116       the final shape of the result; should be equal to the shapes of any input
5117       but with the number of input values in the first dimension.
5118     name: A name for the operation (optional).
5119 
5120   Returns:
5121     A `Tensor`. Has the same type as `values`.
5122   """
5123   _ctx = _context._context
5124   if _ctx is None or not _ctx._eager_context.is_eager:
5125     if not isinstance(values, (list, tuple)):
5126       raise TypeError(
5127           "Expected list for 'values' argument to "
5128           "'parallel_concat' Op, not %r." % values)
5129     _attr_N = len(values)
5130     shape = _execute.make_shape(shape, "shape")
5131     _, _, _op = _op_def_lib._apply_op_helper(
5132         "ParallelConcat", values=values, shape=shape, name=name)
5133     _result = _op.outputs[:]
5134     _inputs_flat = _op.inputs
5135     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape",
5136               _op.get_attr("shape"))
5137     _execute.record_gradient(
5138       "ParallelConcat", _inputs_flat, _attrs, _result, name)
5139     _result, = _result
5140     return _result
5141 
5142   else:
5143     try:
5144       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5145         _ctx._context_handle, _ctx._eager_context.device_name,
5146         "ParallelConcat", name, _ctx._post_execution_callbacks, values,
5147         "shape", shape)
5148       return _result
5149     except _core._FallbackException:
5150       return parallel_concat_eager_fallback(
5151           values, shape=shape, name=name, ctx=_ctx)
5152     except _core._NotOkStatusException as e:
5153       if name is not None:
5154         message = e.message + " name: " + name
5155       else:
5156         message = e.message
5157       _six.raise_from(_core._status_to_exception(e.code, message), None)
5158 
5159 
5160 def parallel_concat_eager_fallback(values, shape, name=None, ctx=None):
5161   r"""This is the slowpath function for Eager mode.
5162   This is for function parallel_concat
5163   """
5164   _ctx = ctx if ctx else _context.context()
5165   if not isinstance(values, (list, tuple)):
5166     raise TypeError(
5167         "Expected list for 'values' argument to "
5168         "'parallel_concat' Op, not %r." % values)
5169   _attr_N = len(values)
5170   shape = _execute.make_shape(shape, "shape")
5171   _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)
5172   _inputs_flat = list(values)
5173   _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape)
5174   _result = _execute.execute(b"ParallelConcat", 1, inputs=_inputs_flat,
5175                              attrs=_attrs, ctx=_ctx, name=name)
5176   _execute.record_gradient(
5177       "ParallelConcat", _inputs_flat, _attrs, _result, name)
5178   _result, = _result
5179   return _result
5180 
5181 
5182 def placeholder(dtype, shape=None, name=None):
5183   r"""A placeholder op for a value that will be fed into the computation.
5184 
5185   N.B. This operation will fail with an error if it is executed. It is
5186   intended as a way to represent a value that will always be fed, and to
5187   provide attrs that enable the fed value to be checked at runtime.
5188 
5189   Args:
5190     dtype: A `tf.DType`. The type of elements in the tensor.
5191     shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5192       (Optional) The shape of the tensor. If the shape has 0 dimensions, the
5193       shape is unconstrained.
5194     name: A name for the operation (optional).
5195 
5196   Returns:
5197     A `Tensor` of type `dtype`.
5198   """
5199   _ctx = _context._context
5200   if _ctx is None or not _ctx._eager_context.is_eager:
5201     dtype = _execute.make_type(dtype, "dtype")
5202     if shape is None:
5203       shape = None
5204     shape = _execute.make_shape(shape, "shape")
5205     _, _, _op = _op_def_lib._apply_op_helper(
5206         "Placeholder", dtype=dtype, shape=shape, name=name)
5207     _result = _op.outputs[:]
5208     _inputs_flat = _op.inputs
5209     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"))
5210     _execute.record_gradient(
5211       "Placeholder", _inputs_flat, _attrs, _result, name)
5212     _result, = _result
5213     return _result
5214 
5215   else:
5216     try:
5217       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5218         _ctx._context_handle, _ctx._eager_context.device_name, "Placeholder",
5219         name, _ctx._post_execution_callbacks, "dtype", dtype, "shape", shape)
5220       return _result
5221     except _core._FallbackException:
5222       return placeholder_eager_fallback(
5223           dtype=dtype, shape=shape, name=name, ctx=_ctx)
5224     except _core._NotOkStatusException as e:
5225       if name is not None:
5226         message = e.message + " name: " + name
5227       else:
5228         message = e.message
5229       _six.raise_from(_core._status_to_exception(e.code, message), None)
5230 
5231 
5232 def placeholder_eager_fallback(dtype, shape=None, name=None, ctx=None):
5233   r"""This is the slowpath function for Eager mode.
5234   This is for function placeholder
5235   """
5236   _ctx = ctx if ctx else _context.context()
5237   dtype = _execute.make_type(dtype, "dtype")
5238   if shape is None:
5239     shape = None
5240   shape = _execute.make_shape(shape, "shape")
5241   _inputs_flat = []
5242   _attrs = ("dtype", dtype, "shape", shape)
5243   _result = _execute.execute(b"Placeholder", 1, inputs=_inputs_flat,
5244                              attrs=_attrs, ctx=_ctx, name=name)
5245   _execute.record_gradient(
5246       "Placeholder", _inputs_flat, _attrs, _result, name)
5247   _result, = _result
5248   return _result
5249 
5250 
5251 def placeholder_v2(dtype, shape, name=None):
5252   r"""A placeholder op for a value that will be fed into the computation.
5253 
5254   N.B. This operation will fail with an error if it is executed. It is
5255   intended as a way to represent a value that will always be fed, and to
5256   provide attrs that enable the fed value to be checked at runtime.
5257 
5258   Args:
5259     dtype: A `tf.DType`. The type of elements in the tensor.
5260     shape: A `tf.TensorShape` or list of `ints`.
5261       The shape of the tensor. The shape can be any partially-specified
5262       shape.  To be unconstrained, pass in a shape with unknown rank.
5263     name: A name for the operation (optional).
5264 
5265   Returns:
5266     A `Tensor` of type `dtype`.
5267   """
5268   _ctx = _context._context
5269   if _ctx is None or not _ctx._eager_context.is_eager:
5270     dtype = _execute.make_type(dtype, "dtype")
5271     shape = _execute.make_shape(shape, "shape")
5272     _, _, _op = _op_def_lib._apply_op_helper(
5273         "PlaceholderV2", dtype=dtype, shape=shape, name=name)
5274     _result = _op.outputs[:]
5275     _inputs_flat = _op.inputs
5276     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"))
5277     _execute.record_gradient(
5278       "PlaceholderV2", _inputs_flat, _attrs, _result, name)
5279     _result, = _result
5280     return _result
5281 
5282   else:
5283     try:
5284       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5285         _ctx._context_handle, _ctx._eager_context.device_name,
5286         "PlaceholderV2", name, _ctx._post_execution_callbacks, "dtype", dtype,
5287         "shape", shape)
5288       return _result
5289     except _core._FallbackException:
5290       return placeholder_v2_eager_fallback(
5291           dtype=dtype, shape=shape, name=name, ctx=_ctx)
5292     except _core._NotOkStatusException as e:
5293       if name is not None:
5294         message = e.message + " name: " + name
5295       else:
5296         message = e.message
5297       _six.raise_from(_core._status_to_exception(e.code, message), None)
5298 
5299 
5300 def placeholder_v2_eager_fallback(dtype, shape, name=None, ctx=None):
5301   r"""This is the slowpath function for Eager mode.
5302   This is for function placeholder_v2
5303   """
5304   _ctx = ctx if ctx else _context.context()
5305   dtype = _execute.make_type(dtype, "dtype")
5306   shape = _execute.make_shape(shape, "shape")
5307   _inputs_flat = []
5308   _attrs = ("dtype", dtype, "shape", shape)
5309   _result = _execute.execute(b"PlaceholderV2", 1, inputs=_inputs_flat,
5310                              attrs=_attrs, ctx=_ctx, name=name)
5311   _execute.record_gradient(
5312       "PlaceholderV2", _inputs_flat, _attrs, _result, name)
5313   _result, = _result
5314   return _result
5315 
5316 
5317 @tf_export('placeholder_with_default')
5318 def placeholder_with_default(input, shape, name=None):
5319   r"""A placeholder op that passes through `input` when its output is not fed.
5320 
5321   Args:
5322     input: A `Tensor`. The default value to produce when `output` is not fed.
5323     shape: A `tf.TensorShape` or list of `ints`.
5324       The (possibly partial) shape of the tensor.
5325     name: A name for the operation (optional).
5326 
5327   Returns:
5328     A `Tensor`. Has the same type as `input`.
5329   """
5330   _ctx = _context._context
5331   if _ctx is None or not _ctx._eager_context.is_eager:
5332     shape = _execute.make_shape(shape, "shape")
5333     _, _, _op = _op_def_lib._apply_op_helper(
5334         "PlaceholderWithDefault", input=input, shape=shape, name=name)
5335     _result = _op.outputs[:]
5336     _inputs_flat = _op.inputs
5337     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"))
5338     _execute.record_gradient(
5339       "PlaceholderWithDefault", _inputs_flat, _attrs, _result, name)
5340     _result, = _result
5341     return _result
5342 
5343   else:
5344     try:
5345       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5346         _ctx._context_handle, _ctx._eager_context.device_name,
5347         "PlaceholderWithDefault", name, _ctx._post_execution_callbacks, input,
5348         "shape", shape)
5349       return _result
5350     except _core._FallbackException:
5351       return placeholder_with_default_eager_fallback(
5352           input, shape=shape, name=name, ctx=_ctx)
5353     except _core._NotOkStatusException as e:
5354       if name is not None:
5355         message = e.message + " name: " + name
5356       else:
5357         message = e.message
5358       _six.raise_from(_core._status_to_exception(e.code, message), None)
5359 
5360 
5361 def placeholder_with_default_eager_fallback(input, shape, name=None, ctx=None):
5362   r"""This is the slowpath function for Eager mode.
5363   This is for function placeholder_with_default
5364   """
5365   _ctx = ctx if ctx else _context.context()
5366   shape = _execute.make_shape(shape, "shape")
5367   _attr_dtype, (input,) = _execute.args_to_matching_eager([input], _ctx)
5368   _inputs_flat = [input]
5369   _attrs = ("dtype", _attr_dtype, "shape", shape)
5370   _result = _execute.execute(b"PlaceholderWithDefault", 1,
5371                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
5372                              name=name)
5373   _execute.record_gradient(
5374       "PlaceholderWithDefault", _inputs_flat, _attrs, _result, name)
5375   _result, = _result
5376   return _result
5377 
5378 
5379 def prevent_gradient(input, message="", name=None):
5380   r"""An identity op that triggers an error if a gradient is requested.
5381 
5382   When executed in a graph, this op outputs its input tensor as-is.
5383 
5384   When building ops to compute gradients, the TensorFlow gradient system
5385   will return an error when trying to lookup the gradient of this op,
5386   because no gradient must ever be registered for this function.  This
5387   op exists to prevent subtle bugs from silently returning unimplemented
5388   gradients in some corner cases.
5389 
5390   Args:
5391     input: A `Tensor`. any tensor.
5392     message: An optional `string`. Defaults to `""`.
5393       Will be printed in the error when anyone tries to differentiate
5394       this operation.
5395     name: A name for the operation (optional).
5396 
5397   Returns:
5398     A `Tensor`. Has the same type as `input`.
5399   """
5400   _ctx = _context._context
5401   if _ctx is None or not _ctx._eager_context.is_eager:
5402     if message is None:
5403       message = ""
5404     message = _execute.make_str(message, "message")
5405     _, _, _op = _op_def_lib._apply_op_helper(
5406         "PreventGradient", input=input, message=message, name=name)
5407     _result = _op.outputs[:]
5408     _inputs_flat = _op.inputs
5409     _attrs = ("T", _op.get_attr("T"), "message", _op.get_attr("message"))
5410     _execute.record_gradient(
5411       "PreventGradient", _inputs_flat, _attrs, _result, name)
5412     _result, = _result
5413     return _result
5414 
5415   else:
5416     try:
5417       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5418         _ctx._context_handle, _ctx._eager_context.device_name,
5419         "PreventGradient", name, _ctx._post_execution_callbacks, input,
5420         "message", message)
5421       return _result
5422     except _core._FallbackException:
5423       return prevent_gradient_eager_fallback(
5424           input, message=message, name=name, ctx=_ctx)
5425     except _core._NotOkStatusException as e:
5426       if name is not None:
5427         message = e.message + " name: " + name
5428       else:
5429         message = e.message
5430       _six.raise_from(_core._status_to_exception(e.code, message), None)
5431 
5432 
5433 def prevent_gradient_eager_fallback(input, message="", name=None, ctx=None):
5434   r"""This is the slowpath function for Eager mode.
5435   This is for function prevent_gradient
5436   """
5437   _ctx = ctx if ctx else _context.context()
5438   if message is None:
5439     message = ""
5440   message = _execute.make_str(message, "message")
5441   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
5442   _inputs_flat = [input]
5443   _attrs = ("T", _attr_T, "message", message)
5444   _result = _execute.execute(b"PreventGradient", 1, inputs=_inputs_flat,
5445                              attrs=_attrs, ctx=_ctx, name=name)
5446   _execute.record_gradient(
5447       "PreventGradient", _inputs_flat, _attrs, _result, name)
5448   _result, = _result
5449   return _result
5450 
5451 
5452 def quantize_and_dequantize(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None):
5453   r"""Use QuantizeAndDequantizeV2 instead.
5454 
5455   Args:
5456     input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
5457     signed_input: An optional `bool`. Defaults to `True`.
5458     num_bits: An optional `int`. Defaults to `8`.
5459     range_given: An optional `bool`. Defaults to `False`.
5460     input_min: An optional `float`. Defaults to `0`.
5461     input_max: An optional `float`. Defaults to `0`.
5462     name: A name for the operation (optional).
5463 
5464   Returns:
5465     A `Tensor`. Has the same type as `input`.
5466   """
5467   _ctx = _context._context
5468   if _ctx is None or not _ctx._eager_context.is_eager:
5469     if signed_input is None:
5470       signed_input = True
5471     signed_input = _execute.make_bool(signed_input, "signed_input")
5472     if num_bits is None:
5473       num_bits = 8
5474     num_bits = _execute.make_int(num_bits, "num_bits")
5475     if range_given is None:
5476       range_given = False
5477     range_given = _execute.make_bool(range_given, "range_given")
5478     if input_min is None:
5479       input_min = 0
5480     input_min = _execute.make_float(input_min, "input_min")
5481     if input_max is None:
5482       input_max = 0
5483     input_max = _execute.make_float(input_max, "input_max")
5484     _, _, _op = _op_def_lib._apply_op_helper(
5485         "QuantizeAndDequantize", input=input, signed_input=signed_input,
5486         num_bits=num_bits, range_given=range_given, input_min=input_min,
5487         input_max=input_max, name=name)
5488     _result = _op.outputs[:]
5489     _inputs_flat = _op.inputs
5490     _attrs = ("signed_input", _op.get_attr("signed_input"), "num_bits",
5491               _op.get_attr("num_bits"), "range_given",
5492               _op.get_attr("range_given"), "input_min",
5493               _op.get_attr("input_min"), "input_max",
5494               _op.get_attr("input_max"), "T", _op.get_attr("T"))
5495     _execute.record_gradient(
5496       "QuantizeAndDequantize", _inputs_flat, _attrs, _result, name)
5497     _result, = _result
5498     return _result
5499 
5500   else:
5501     try:
5502       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5503         _ctx._context_handle, _ctx._eager_context.device_name,
5504         "QuantizeAndDequantize", name, _ctx._post_execution_callbacks, input,
5505         "signed_input", signed_input, "num_bits", num_bits, "range_given",
5506         range_given, "input_min", input_min, "input_max", input_max)
5507       return _result
5508     except _core._FallbackException:
5509       return quantize_and_dequantize_eager_fallback(
5510           input, signed_input=signed_input, num_bits=num_bits,
5511           range_given=range_given, input_min=input_min, input_max=input_max,
5512           name=name, ctx=_ctx)
5513     except _core._NotOkStatusException as e:
5514       if name is not None:
5515         message = e.message + " name: " + name
5516       else:
5517         message = e.message
5518       _six.raise_from(_core._status_to_exception(e.code, message), None)
5519 
5520 
5521 def quantize_and_dequantize_eager_fallback(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None, ctx=None):
5522   r"""This is the slowpath function for Eager mode.
5523   This is for function quantize_and_dequantize
5524   """
5525   _ctx = ctx if ctx else _context.context()
5526   if signed_input is None:
5527     signed_input = True
5528   signed_input = _execute.make_bool(signed_input, "signed_input")
5529   if num_bits is None:
5530     num_bits = 8
5531   num_bits = _execute.make_int(num_bits, "num_bits")
5532   if range_given is None:
5533     range_given = False
5534   range_given = _execute.make_bool(range_given, "range_given")
5535   if input_min is None:
5536     input_min = 0
5537   input_min = _execute.make_float(input_min, "input_min")
5538   if input_max is None:
5539     input_max = 0
5540   input_max = _execute.make_float(input_max, "input_max")
5541   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
5542   _inputs_flat = [input]
5543   _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given",
5544   range_given, "input_min", input_min, "input_max", input_max, "T", _attr_T)
5545   _result = _execute.execute(b"QuantizeAndDequantize", 1, inputs=_inputs_flat,
5546                              attrs=_attrs, ctx=_ctx, name=name)
5547   _execute.record_gradient(
5548       "QuantizeAndDequantize", _inputs_flat, _attrs, _result, name)
5549   _result, = _result
5550   return _result
5551 
5552 
5553 def quantize_and_dequantize_v2(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, name=None):
5554   r"""Quantizes then dequantizes a tensor.
5555 
5556   This op simulates the precision loss from the quantized forward pass by:
5557 
5558   1. Quantizing the tensor to fixed point numbers, which should match the target
5559      quantization method when it is used in inference.
5560   2. Dequantizing it back to floating point numbers for the following ops, most
5561      likely matmul.
5562 
5563   There are different ways to quantize. This version uses only scaling, so 0.0
5564   maps to 0.
5565 
5566   From the specified 'num_bits' in the quantized output type, it determines
5567   minimum and maximum representable quantized values.
5568 
5569   e.g.
5570 
5571   *   [-128, 127] for signed, num_bits = 8, or
5572   *   [0, 255] for unsigned, num_bits = 8.
5573 
5574   If range_given == False, the initial input_min, input_max will be determined
5575   automatically as the minimum and maximum values in the input tensor, otherwise
5576   the specified values of input_min, input_max are used.
5577 
5578   Note: If the input_min, input_max are specified, they do not need to equal the
5579   actual minimum and maximum values in the tensor. e.g. in some cases it may be
5580   beneficial to specify these values such that the low probability extremes of the
5581   input distribution are clipped.
5582 
5583   This op determines the maximum scale_factor that would map the initial
5584   [input_min, input_max] range to a range that lies within the representable
5585   quantized range.
5586 
5587   It determines the scale from one of input_min and input_max, then updates the
5588   other one to maximize the respresentable range.
5589 
5590   e.g.
5591 
5592   *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
5593       5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
5594       would update input_max to be 127 / 12.8 = 9.921875
5595   *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
5596       10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
5597       would update input_min to be 128.0 / 12.7 = -10.07874
5598   *   if the output is unsigned, input_min is forced to be 0, and only the
5599       specified input_max is used.
5600 
5601   After determining the scale_factor and updating the input range, it applies the
5602   following to each value in the 'input' tensor.
5603 
5604   output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
5605 
5606   Args:
5607     input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
5608       Tensor to quantize and then dequantize.
5609     input_min: A `Tensor`. Must have the same type as `input`.
5610       If `range_given == True`, this specifies the minimum input value that needs to
5611       be represented, otherwise it is determined from the min value of the `input`
5612       tensor.
5613     input_max: A `Tensor`. Must have the same type as `input`.
5614       If `range_given == True`, this specifies the maximum input value that needs to
5615       be represented, otherwise it is determined from the max value of the `input`
5616       tensor.
5617     signed_input: An optional `bool`. Defaults to `True`.
5618       Whether the quantization is signed or unsigned. (actually this parameter should
5619       have been called <b>`signed_output`</b>)
5620     num_bits: An optional `int`. Defaults to `8`.
5621       The bitwidth of the quantization.
5622     range_given: An optional `bool`. Defaults to `False`.
5623       Whether the range is given or should be determined from the `input` tensor.
5624     name: A name for the operation (optional).
5625 
5626   Returns:
5627     A `Tensor`. Has the same type as `input`.
5628   """
5629   _ctx = _context._context
5630   if _ctx is None or not _ctx._eager_context.is_eager:
5631     if signed_input is None:
5632       signed_input = True
5633     signed_input = _execute.make_bool(signed_input, "signed_input")
5634     if num_bits is None:
5635       num_bits = 8
5636     num_bits = _execute.make_int(num_bits, "num_bits")
5637     if range_given is None:
5638       range_given = False
5639     range_given = _execute.make_bool(range_given, "range_given")
5640     _, _, _op = _op_def_lib._apply_op_helper(
5641         "QuantizeAndDequantizeV2", input=input, input_min=input_min,
5642         input_max=input_max, signed_input=signed_input, num_bits=num_bits,
5643         range_given=range_given, name=name)
5644     _result = _op.outputs[:]
5645     _inputs_flat = _op.inputs
5646     _attrs = ("signed_input", _op.get_attr("signed_input"), "num_bits",
5647               _op.get_attr("num_bits"), "range_given",
5648               _op.get_attr("range_given"), "T", _op.get_attr("T"))
5649     _execute.record_gradient(
5650       "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result, name)
5651     _result, = _result
5652     return _result
5653 
5654   else:
5655     try:
5656       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5657         _ctx._context_handle, _ctx._eager_context.device_name,
5658         "QuantizeAndDequantizeV2", name, _ctx._post_execution_callbacks,
5659         input, input_min, input_max, "signed_input", signed_input, "num_bits",
5660         num_bits, "range_given", range_given)
5661       return _result
5662     except _core._FallbackException:
5663       return quantize_and_dequantize_v2_eager_fallback(
5664           input, input_min, input_max, signed_input=signed_input,
5665           num_bits=num_bits, range_given=range_given, name=name, ctx=_ctx)
5666     except _core._NotOkStatusException as e:
5667       if name is not None:
5668         message = e.message + " name: " + name
5669       else:
5670         message = e.message
5671       _six.raise_from(_core._status_to_exception(e.code, message), None)
5672 
5673 
5674 def quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, name=None, ctx=None):
5675   r"""This is the slowpath function for Eager mode.
5676   This is for function quantize_and_dequantize_v2
5677   """
5678   _ctx = ctx if ctx else _context.context()
5679   if signed_input is None:
5680     signed_input = True
5681   signed_input = _execute.make_bool(signed_input, "signed_input")
5682   if num_bits is None:
5683     num_bits = 8
5684   num_bits = _execute.make_int(num_bits, "num_bits")
5685   if range_given is None:
5686     range_given = False
5687   range_given = _execute.make_bool(range_given, "range_given")
5688   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx)
5689   (input, input_min, input_max) = _inputs_T
5690   _inputs_flat = [input, input_min, input_max]
5691   _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given",
5692   range_given, "T", _attr_T)
5693   _result = _execute.execute(b"QuantizeAndDequantizeV2", 1,
5694                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
5695                              name=name)
5696   _execute.record_gradient(
5697       "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result, name)
5698   _result, = _result
5699   return _result
5700 
5701 
5702 def quantize_and_dequantize_v3(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None):
5703   r"""Quantizes then dequantizes a tensor.
5704 
5705   This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
5706   tensor, so its value can change during training.
5707 
5708   Args:
5709     input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
5710     input_min: A `Tensor`. Must have the same type as `input`.
5711     input_max: A `Tensor`. Must have the same type as `input`.
5712     num_bits: A `Tensor` of type `int32`.
5713     signed_input: An optional `bool`. Defaults to `True`.
5714     range_given: An optional `bool`. Defaults to `True`.
5715     name: A name for the operation (optional).
5716 
5717   Returns:
5718     A `Tensor`. Has the same type as `input`.
5719   """
5720   _ctx = _context._context
5721   if _ctx is None or not _ctx._eager_context.is_eager:
5722     if signed_input is None:
5723       signed_input = True
5724     signed_input = _execute.make_bool(signed_input, "signed_input")
5725     if range_given is None:
5726       range_given = True
5727     range_given = _execute.make_bool(range_given, "range_given")
5728     _, _, _op = _op_def_lib._apply_op_helper(
5729         "QuantizeAndDequantizeV3", input=input, input_min=input_min,
5730         input_max=input_max, num_bits=num_bits, signed_input=signed_input,
5731         range_given=range_given, name=name)
5732     _result = _op.outputs[:]
5733     _inputs_flat = _op.inputs
5734     _attrs = ("signed_input", _op.get_attr("signed_input"), "range_given",
5735               _op.get_attr("range_given"), "T", _op.get_attr("T"))
5736     _execute.record_gradient(
5737       "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result, name)
5738     _result, = _result
5739     return _result
5740 
5741   else:
5742     try:
5743       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5744         _ctx._context_handle, _ctx._eager_context.device_name,
5745         "QuantizeAndDequantizeV3", name, _ctx._post_execution_callbacks,
5746         input, input_min, input_max, num_bits, "signed_input", signed_input,
5747         "range_given", range_given)
5748       return _result
5749     except _core._FallbackException:
5750       return quantize_and_dequantize_v3_eager_fallback(
5751           input, input_min, input_max, num_bits, signed_input=signed_input,
5752           range_given=range_given, name=name, ctx=_ctx)
5753     except _core._NotOkStatusException as e:
5754       if name is not None:
5755         message = e.message + " name: " + name
5756       else:
5757         message = e.message
5758       _six.raise_from(_core._status_to_exception(e.code, message), None)
5759 
5760 
5761 def quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None, ctx=None):
5762   r"""This is the slowpath function for Eager mode.
5763   This is for function quantize_and_dequantize_v3
5764   """
5765   _ctx = ctx if ctx else _context.context()
5766   if signed_input is None:
5767     signed_input = True
5768   signed_input = _execute.make_bool(signed_input, "signed_input")
5769   if range_given is None:
5770     range_given = True
5771   range_given = _execute.make_bool(range_given, "range_given")
5772   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx)
5773   (input, input_min, input_max) = _inputs_T
5774   num_bits = _ops.convert_to_tensor(num_bits, _dtypes.int32)
5775   _inputs_flat = [input, input_min, input_max, num_bits]
5776   _attrs = ("signed_input", signed_input, "range_given", range_given, "T",
5777   _attr_T)
5778   _result = _execute.execute(b"QuantizeAndDequantizeV3", 1,
5779                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
5780                              name=name)
5781   _execute.record_gradient(
5782       "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result, name)
5783   _result, = _result
5784   return _result
5785 
5786 
5787 _quantize_v2_outputs = ["output", "output_min", "output_max"]
5788 _QuantizeV2Output = _collections.namedtuple(
5789     "QuantizeV2", _quantize_v2_outputs)
5790 
5791 
5792 def quantize_v2(input, min_range, max_range, T, mode="MIN_COMBINED", round_mode="HALF_AWAY_FROM_ZERO", name=None):
5793   r"""Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
5794 
5795   [min_range, max_range] are scalar floats that specify the range for
5796   the 'input' data. The 'mode' attribute controls exactly which calculations are
5797   used to convert the float values to their quantized equivalents.  The
5798   'round_mode' attribute controls which rounding tie-breaking algorithm is used
5799   when rounding float values to their quantized equivalents.
5800 
5801   In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
5802 
5803   ```
5804   out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
5805   if T == qint8, out[i] -= (range(T) + 1) / 2.0
5806   ```
5807 
5808   here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
5809 
5810   *MIN_COMBINED Mode Example*
5811 
5812   Assume the input is type float and has a possible range of [0.0, 6.0] and the
5813   output type is quint8 ([0, 255]). The min_range and max_range values should be
5814   specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
5815   value of the input by 255/6 and cast to quint8.
5816 
5817   If the output type was qint8 ([-128, 127]), the operation will additionally
5818   subtract each value by 128 prior to casting, so that the range of values aligns
5819   with the range of qint8.
5820 
5821   If the mode is 'MIN_FIRST', then this approach is used:
5822 
5823   ```
5824   num_discrete_values = 1 << (# of bits in T)
5825   range_adjust = num_discrete_values / (num_discrete_values - 1)
5826   range = (range_max - range_min) * range_adjust
5827   range_scale = num_discrete_values / range
5828   quantized = round(input * range_scale) - round(range_min * range_scale) +
5829     numeric_limits<T>::min()
5830   quantized = max(quantized, numeric_limits<T>::min())
5831   quantized = min(quantized, numeric_limits<T>::max())
5832   ```
5833 
5834   The biggest difference between this and MIN_COMBINED is that the minimum range
5835   is rounded first, before it's subtracted from the rounded value. With
5836   MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
5837   and dequantizing will introduce a larger and larger error.
5838 
5839   *SCALED mode Example*
5840 
5841   `SCALED` mode matches the quantization approach used in
5842   `QuantizeAndDequantize{V2|V3}`.
5843 
5844   If the mode is `SCALED`, we do not use the full range of the output type,
5845   choosing to elide the lowest possible value for symmetry (e.g., output range is
5846   -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
5847   0.
5848 
5849   We first find the range of values in our tensor. The
5850   range we use is always centered on 0, so we find m such that
5851 
5852   ```c++
5853     m = max(abs(input_min), abs(input_max))
5854   ```
5855 
5856   Our input tensor range is then `[-m, m]`.
5857 
5858   Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
5859   If T is signed, this is
5860 
5861   ```
5862     num_bits = sizeof(T) * 8
5863     [min_fixed, max_fixed] =
5864         [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
5865   ```
5866 
5867   Otherwise, if T is unsigned, the fixed-point range is
5868 
5869   ```
5870     [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
5871   ```
5872 
5873   From this we compute our scaling factor, s:
5874 
5875   ```c++
5876     s = (max_fixed - min_fixed) / (2 * m)
5877   ```
5878 
5879   Now we can quantize the elements of our tensor:
5880 
5881   ```c++
5882   result = round(input * s)
5883   ```
5884 
5885   One thing to watch out for is that the operator may choose to adjust the
5886   requested minimum and maximum values slightly during the quantization process,
5887   so you should always use the output ports as the range for further calculations.
5888   For example, if the requested minimum and maximum values are close to equal,
5889   they will be separated by a small epsilon value to prevent ill-formed quantized
5890   buffers from being created. Otherwise, you can end up with buffers where all the
5891   quantized values map to the same float value, which causes problems for
5892   operations that have to perform further calculations on them.
5893 
5894   Args:
5895     input: A `Tensor` of type `float32`.
5896     min_range: A `Tensor` of type `float32`.
5897       The minimum scalar value possibly produced for the input.
5898     max_range: A `Tensor` of type `float32`.
5899       The maximum scalar value possibly produced for the input.
5900     T: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
5901     mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`.
5902     round_mode: An optional `string` from: `"HALF_AWAY_FROM_ZERO", "HALF_TO_EVEN"`. Defaults to `"HALF_AWAY_FROM_ZERO"`.
5903     name: A name for the operation (optional).
5904 
5905   Returns:
5906     A tuple of `Tensor` objects (output, output_min, output_max).
5907 
5908     output: A `Tensor` of type `T`.
5909     output_min: A `Tensor` of type `float32`.
5910     output_max: A `Tensor` of type `float32`.
5911   """
5912   _ctx = _context._context
5913   if _ctx is None or not _ctx._eager_context.is_eager:
5914     T = _execute.make_type(T, "T")
5915     if mode is None:
5916       mode = "MIN_COMBINED"
5917     mode = _execute.make_str(mode, "mode")
5918     if round_mode is None:
5919       round_mode = "HALF_AWAY_FROM_ZERO"
5920     round_mode = _execute.make_str(round_mode, "round_mode")
5921     _, _, _op = _op_def_lib._apply_op_helper(
5922         "QuantizeV2", input=input, min_range=min_range, max_range=max_range,
5923         T=T, mode=mode, round_mode=round_mode, name=name)
5924     _result = _op.outputs[:]
5925     _inputs_flat = _op.inputs
5926     _attrs = ("T", _op.get_attr("T"), "mode", _op.get_attr("mode"),
5927               "round_mode", _op.get_attr("round_mode"))
5928     _execute.record_gradient(
5929       "QuantizeV2", _inputs_flat, _attrs, _result, name)
5930     _result = _QuantizeV2Output._make(_result)
5931     return _result
5932 
5933   else:
5934     try:
5935       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5936         _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeV2",
5937         name, _ctx._post_execution_callbacks, input, min_range, max_range,
5938         "T", T, "mode", mode, "round_mode", round_mode)
5939       _result = _QuantizeV2Output._make(_result)
5940       return _result
5941     except _core._FallbackException:
5942       return quantize_v2_eager_fallback(
5943           input, min_range, max_range, T=T, mode=mode, round_mode=round_mode,
5944           name=name, ctx=_ctx)
5945     except _core._NotOkStatusException as e:
5946       if name is not None:
5947         message = e.message + " name: " + name
5948       else:
5949         message = e.message
5950       _six.raise_from(_core._status_to_exception(e.code, message), None)
5951 
5952 
5953 def quantize_v2_eager_fallback(input, min_range, max_range, T, mode="MIN_COMBINED", round_mode="HALF_AWAY_FROM_ZERO", name=None, ctx=None):
5954   r"""This is the slowpath function for Eager mode.
5955   This is for function quantize_v2
5956   """
5957   _ctx = ctx if ctx else _context.context()
5958   T = _execute.make_type(T, "T")
5959   if mode is None:
5960     mode = "MIN_COMBINED"
5961   mode = _execute.make_str(mode, "mode")
5962   if round_mode is None:
5963     round_mode = "HALF_AWAY_FROM_ZERO"
5964   round_mode = _execute.make_str(round_mode, "round_mode")
5965   input = _ops.convert_to_tensor(input, _dtypes.float32)
5966   min_range = _ops.convert_to_tensor(min_range, _dtypes.float32)
5967   max_range = _ops.convert_to_tensor(max_range, _dtypes.float32)
5968   _inputs_flat = [input, min_range, max_range]
5969   _attrs = ("T", T, "mode", mode, "round_mode", round_mode)
5970   _result = _execute.execute(b"QuantizeV2", 3, inputs=_inputs_flat,
5971                              attrs=_attrs, ctx=_ctx, name=name)
5972   _execute.record_gradient(
5973       "QuantizeV2", _inputs_flat, _attrs, _result, name)
5974   _result = _QuantizeV2Output._make(_result)
5975   return _result
5976 
5977 
5978 _quantized_concat_outputs = ["output", "output_min", "output_max"]
5979 _QuantizedConcatOutput = _collections.namedtuple(
5980     "QuantizedConcat", _quantized_concat_outputs)
5981 
5982 
5983 @tf_export('quantization.quantized_concat', 'quantized_concat')
5984 @deprecated_endpoints('quantized_concat')
5985 def quantized_concat(concat_dim, values, input_mins, input_maxes, name=None):
5986   r"""Concatenates quantized tensors along one dimension.
5987 
5988   Args:
5989     concat_dim: A `Tensor` of type `int32`.
5990       0-D.  The dimension along which to concatenate.  Must be in the
5991       range [0, rank(values)).
5992     values: A list of at least 2 `Tensor` objects with the same type.
5993       The `N` Tensors to concatenate. Their ranks and types must match,
5994       and their sizes must match in all dimensions except `concat_dim`.
5995     input_mins: A list with the same length as `values` of `Tensor` objects with type `float32`.
5996       The minimum scalar values for each of the input tensors.
5997     input_maxes: A list with the same length as `values` of `Tensor` objects with type `float32`.
5998       The maximum scalar values for each of the input tensors.
5999     name: A name for the operation (optional).
6000 
6001   Returns:
6002     A tuple of `Tensor` objects (output, output_min, output_max).
6003 
6004     output: A `Tensor`. Has the same type as `values`.
6005     output_min: A `Tensor` of type `float32`.
6006     output_max: A `Tensor` of type `float32`.
6007   """
6008   _ctx = _context._context
6009   if _ctx is None or not _ctx._eager_context.is_eager:
6010     if not isinstance(values, (list, tuple)):
6011       raise TypeError(
6012           "Expected list for 'values' argument to "
6013           "'quantized_concat' Op, not %r." % values)
6014     _attr_N = len(values)
6015     if not isinstance(input_mins, (list, tuple)):
6016       raise TypeError(
6017           "Expected list for 'input_mins' argument to "
6018           "'quantized_concat' Op, not %r." % input_mins)
6019     if len(input_mins) != _attr_N:
6020       raise ValueError(
6021           "List argument 'input_mins' to 'quantized_concat' Op with length %d "
6022           "must match length %d of argument 'values'." %
6023           (len(input_mins), _attr_N))
6024     if not isinstance(input_maxes, (list, tuple)):
6025       raise TypeError(
6026           "Expected list for 'input_maxes' argument to "
6027           "'quantized_concat' Op, not %r." % input_maxes)
6028     if len(input_maxes) != _attr_N:
6029       raise ValueError(
6030           "List argument 'input_maxes' to 'quantized_concat' Op with length %d "
6031           "must match length %d of argument 'values'." %
6032           (len(input_maxes), _attr_N))
6033     _, _, _op = _op_def_lib._apply_op_helper(
6034         "QuantizedConcat", concat_dim=concat_dim, values=values,
6035         input_mins=input_mins, input_maxes=input_maxes, name=name)
6036     _result = _op.outputs[:]
6037     _inputs_flat = _op.inputs
6038     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
6039     _execute.record_gradient(
6040       "QuantizedConcat", _inputs_flat, _attrs, _result, name)
6041     _result = _QuantizedConcatOutput._make(_result)
6042     return _result
6043 
6044   else:
6045     try:
6046       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6047         _ctx._context_handle, _ctx._eager_context.device_name,
6048         "QuantizedConcat", name, _ctx._post_execution_callbacks, concat_dim,
6049         values, input_mins, input_maxes)
6050       _result = _QuantizedConcatOutput._make(_result)
6051       return _result
6052     except _core._FallbackException:
6053       return quantized_concat_eager_fallback(
6054           concat_dim, values, input_mins, input_maxes, name=name, ctx=_ctx)
6055     except _core._NotOkStatusException as e:
6056       if name is not None:
6057         message = e.message + " name: " + name
6058       else:
6059         message = e.message
6060       _six.raise_from(_core._status_to_exception(e.code, message), None)
6061 
6062 
6063 def quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name=None, ctx=None):
6064   r"""This is the slowpath function for Eager mode.
6065   This is for function quantized_concat
6066   """
6067   _ctx = ctx if ctx else _context.context()
6068   if not isinstance(values, (list, tuple)):
6069     raise TypeError(
6070         "Expected list for 'values' argument to "
6071         "'quantized_concat' Op, not %r." % values)
6072   _attr_N = len(values)
6073   if not isinstance(input_mins, (list, tuple)):
6074     raise TypeError(
6075         "Expected list for 'input_mins' argument to "
6076         "'quantized_concat' Op, not %r." % input_mins)
6077   if len(input_mins) != _attr_N:
6078     raise ValueError(
6079         "List argument 'input_mins' to 'quantized_concat' Op with length %d "
6080         "must match length %d of argument 'values'." %
6081         (len(input_mins), _attr_N))
6082   if not isinstance(input_maxes, (list, tuple)):
6083     raise TypeError(
6084         "Expected list for 'input_maxes' argument to "
6085         "'quantized_concat' Op, not %r." % input_maxes)
6086   if len(input_maxes) != _attr_N:
6087     raise ValueError(
6088         "List argument 'input_maxes' to 'quantized_concat' Op with length %d "
6089         "must match length %d of argument 'values'." %
6090         (len(input_maxes), _attr_N))
6091   _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)
6092   concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)
6093   input_mins = _ops.convert_n_to_tensor(input_mins, _dtypes.float32)
6094   input_maxes = _ops.convert_n_to_tensor(input_maxes, _dtypes.float32)
6095   _inputs_flat = [concat_dim] + list(values) + list(input_mins) + list(input_maxes)
6096   _attrs = ("N", _attr_N, "T", _attr_T)
6097   _result = _execute.execute(b"QuantizedConcat", 3, inputs=_inputs_flat,
6098                              attrs=_attrs, ctx=_ctx, name=name)
6099   _execute.record_gradient(
6100       "QuantizedConcat", _inputs_flat, _attrs, _result, name)
6101   _result = _QuantizedConcatOutput._make(_result)
6102   return _result
6103 
6104 
6105 _quantized_instance_norm_outputs = ["y", "y_min", "y_max"]
6106 _QuantizedInstanceNormOutput = _collections.namedtuple(
6107     "QuantizedInstanceNorm", _quantized_instance_norm_outputs)
6108 
6109 
6110 def quantized_instance_norm(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None):
6111   r"""Quantized Instance normalization.
6112 
6113   Args:
6114     x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6115       A 4D input Tensor.
6116     x_min: A `Tensor` of type `float32`.
6117       The value represented by the lowest quantized input.
6118     x_max: A `Tensor` of type `float32`.
6119       The value represented by the highest quantized input.
6120     output_range_given: An optional `bool`. Defaults to `False`.
6121       If True, `given_y_min` and `given_y_min`
6122       and `given_y_max` are used as the output range. Otherwise,
6123       the implementation computes the output range.
6124     given_y_min: An optional `float`. Defaults to `0`.
6125       Output in `y_min` if `output_range_given` is True.
6126     given_y_max: An optional `float`. Defaults to `0`.
6127       Output in `y_max` if `output_range_given` is True.
6128     variance_epsilon: An optional `float`. Defaults to `1e-05`.
6129       A small float number to avoid dividing by 0.
6130     min_separation: An optional `float`. Defaults to `0.001`.
6131       Minimum value of `y_max - y_min`
6132     name: A name for the operation (optional).
6133 
6134   Returns:
6135     A tuple of `Tensor` objects (y, y_min, y_max).
6136 
6137     y: A `Tensor`. Has the same type as `x`.
6138     y_min: A `Tensor` of type `float32`.
6139     y_max: A `Tensor` of type `float32`.
6140   """
6141   _ctx = _context._context
6142   if _ctx is None or not _ctx._eager_context.is_eager:
6143     if output_range_given is None:
6144       output_range_given = False
6145     output_range_given = _execute.make_bool(output_range_given, "output_range_given")
6146     if given_y_min is None:
6147       given_y_min = 0
6148     given_y_min = _execute.make_float(given_y_min, "given_y_min")
6149     if given_y_max is None:
6150       given_y_max = 0
6151     given_y_max = _execute.make_float(given_y_max, "given_y_max")
6152     if variance_epsilon is None:
6153       variance_epsilon = 1e-05
6154     variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
6155     if min_separation is None:
6156       min_separation = 0.001
6157     min_separation = _execute.make_float(min_separation, "min_separation")
6158     _, _, _op = _op_def_lib._apply_op_helper(
6159         "QuantizedInstanceNorm", x=x, x_min=x_min, x_max=x_max,
6160         output_range_given=output_range_given, given_y_min=given_y_min,
6161         given_y_max=given_y_max, variance_epsilon=variance_epsilon,
6162         min_separation=min_separation, name=name)
6163     _result = _op.outputs[:]
6164     _inputs_flat = _op.inputs
6165     _attrs = ("T", _op.get_attr("T"), "output_range_given",
6166               _op.get_attr("output_range_given"), "given_y_min",
6167               _op.get_attr("given_y_min"), "given_y_max",
6168               _op.get_attr("given_y_max"), "variance_epsilon",
6169               _op.get_attr("variance_epsilon"), "min_separation",
6170               _op.get_attr("min_separation"))
6171     _execute.record_gradient(
6172       "QuantizedInstanceNorm", _inputs_flat, _attrs, _result, name)
6173     _result = _QuantizedInstanceNormOutput._make(_result)
6174     return _result
6175 
6176   else:
6177     try:
6178       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6179         _ctx._context_handle, _ctx._eager_context.device_name,
6180         "QuantizedInstanceNorm", name, _ctx._post_execution_callbacks, x,
6181         x_min, x_max, "output_range_given", output_range_given, "given_y_min",
6182         given_y_min, "given_y_max", given_y_max, "variance_epsilon",
6183         variance_epsilon, "min_separation", min_separation)
6184       _result = _QuantizedInstanceNormOutput._make(_result)
6185       return _result
6186     except _core._FallbackException:
6187       return quantized_instance_norm_eager_fallback(
6188           x, x_min, x_max, output_range_given=output_range_given,
6189           given_y_min=given_y_min, given_y_max=given_y_max,
6190           variance_epsilon=variance_epsilon, min_separation=min_separation,
6191           name=name, ctx=_ctx)
6192     except _core._NotOkStatusException as e:
6193       if name is not None:
6194         message = e.message + " name: " + name
6195       else:
6196         message = e.message
6197       _six.raise_from(_core._status_to_exception(e.code, message), None)
6198 
6199 
6200 def quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None, ctx=None):
6201   r"""This is the slowpath function for Eager mode.
6202   This is for function quantized_instance_norm
6203   """
6204   _ctx = ctx if ctx else _context.context()
6205   if output_range_given is None:
6206     output_range_given = False
6207   output_range_given = _execute.make_bool(output_range_given, "output_range_given")
6208   if given_y_min is None:
6209     given_y_min = 0
6210   given_y_min = _execute.make_float(given_y_min, "given_y_min")
6211   if given_y_max is None:
6212     given_y_max = 0
6213   given_y_max = _execute.make_float(given_y_max, "given_y_max")
6214   if variance_epsilon is None:
6215     variance_epsilon = 1e-05
6216   variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
6217   if min_separation is None:
6218     min_separation = 0.001
6219   min_separation = _execute.make_float(min_separation, "min_separation")
6220   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
6221   x_min = _ops.convert_to_tensor(x_min, _dtypes.float32)
6222   x_max = _ops.convert_to_tensor(x_max, _dtypes.float32)
6223   _inputs_flat = [x, x_min, x_max]
6224   _attrs = ("T", _attr_T, "output_range_given", output_range_given,
6225   "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon",
6226   variance_epsilon, "min_separation", min_separation)
6227   _result = _execute.execute(b"QuantizedInstanceNorm", 3, inputs=_inputs_flat,
6228                              attrs=_attrs, ctx=_ctx, name=name)
6229   _execute.record_gradient(
6230       "QuantizedInstanceNorm", _inputs_flat, _attrs, _result, name)
6231   _result = _QuantizedInstanceNormOutput._make(_result)
6232   return _result
6233 
6234 
6235 _quantized_reshape_outputs = ["output", "output_min", "output_max"]
6236 _QuantizedReshapeOutput = _collections.namedtuple(
6237     "QuantizedReshape", _quantized_reshape_outputs)
6238 
6239 
6240 def quantized_reshape(tensor, shape, input_min, input_max, name=None):
6241   r"""Reshapes a quantized tensor as per the Reshape op.
6242 
6243   ```
6244 
6245   Args:
6246     tensor: A `Tensor`.
6247     shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6248       Defines the shape of the output tensor.
6249     input_min: A `Tensor` of type `float32`. The minimum value of the input.
6250     input_max: A `Tensor` of type `float32`. The maximum value of the input.
6251     name: A name for the operation (optional).
6252 
6253   Returns:
6254     A tuple of `Tensor` objects (output, output_min, output_max).
6255 
6256     output: A `Tensor`. Has the same type as `tensor`.
6257     output_min: A `Tensor` of type `float32`.
6258     output_max: A `Tensor` of type `float32`.
6259   """
6260   _ctx = _context._context
6261   if _ctx is None or not _ctx._eager_context.is_eager:
6262     _, _, _op = _op_def_lib._apply_op_helper(
6263         "QuantizedReshape", tensor=tensor, shape=shape, input_min=input_min,
6264         input_max=input_max, name=name)
6265     _result = _op.outputs[:]
6266     _inputs_flat = _op.inputs
6267     _attrs = ("T", _op.get_attr("T"), "Tshape", _op.get_attr("Tshape"))
6268     _execute.record_gradient(
6269       "QuantizedReshape", _inputs_flat, _attrs, _result, name)
6270     _result = _QuantizedReshapeOutput._make(_result)
6271     return _result
6272 
6273   else:
6274     try:
6275       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6276         _ctx._context_handle, _ctx._eager_context.device_name,
6277         "QuantizedReshape", name, _ctx._post_execution_callbacks, tensor,
6278         shape, input_min, input_max)
6279       _result = _QuantizedReshapeOutput._make(_result)
6280       return _result
6281     except _core._FallbackException:
6282       return quantized_reshape_eager_fallback(
6283           tensor, shape, input_min, input_max, name=name, ctx=_ctx)
6284     except _core._NotOkStatusException as e:
6285       if name is not None:
6286         message = e.message + " name: " + name
6287       else:
6288         message = e.message
6289       _six.raise_from(_core._status_to_exception(e.code, message), None)
6290 
6291 
6292 def quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name=None, ctx=None):
6293   r"""This is the slowpath function for Eager mode.
6294   This is for function quantized_reshape
6295   """
6296   _ctx = ctx if ctx else _context.context()
6297   _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)
6298   _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
6299   input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
6300   input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
6301   _inputs_flat = [tensor, shape, input_min, input_max]
6302   _attrs = ("T", _attr_T, "Tshape", _attr_Tshape)
6303   _result = _execute.execute(b"QuantizedReshape", 3, inputs=_inputs_flat,
6304                              attrs=_attrs, ctx=_ctx, name=name)
6305   _execute.record_gradient(
6306       "QuantizedReshape", _inputs_flat, _attrs, _result, name)
6307   _result = _QuantizedReshapeOutput._make(_result)
6308   return _result
6309 
6310 
6311 def rank(input, name=None):
6312   r"""Returns the rank of a tensor.
6313 
6314   This operation returns an integer representing the rank of `input`.
6315 
6316   For example:
6317 
6318   ```
6319   # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
6320   # shape of tensor 't' is [2, 2, 3]
6321   rank(t) ==> 3
6322   ```
6323 
6324   **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
6325   of a tensor is the number of indices required to uniquely select each element
6326   of the tensor. Rank is also known as "order", "degree", or "ndims."
6327 
6328   Args:
6329     input: A `Tensor`.
6330     name: A name for the operation (optional).
6331 
6332   Returns:
6333     A `Tensor` of type `int32`.
6334   """
6335   _ctx = _context._context
6336   if _ctx is None or not _ctx._eager_context.is_eager:
6337     _, _, _op = _op_def_lib._apply_op_helper(
6338         "Rank", input=input, name=name)
6339     _result = _op.outputs[:]
6340     _inputs_flat = _op.inputs
6341     _attrs = ("T", _op.get_attr("T"))
6342     _execute.record_gradient(
6343       "Rank", _inputs_flat, _attrs, _result, name)
6344     _result, = _result
6345     return _result
6346 
6347   else:
6348     try:
6349       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6350         _ctx._context_handle, _ctx._eager_context.device_name, "Rank", name,
6351         _ctx._post_execution_callbacks, input)
6352       return _result
6353     except _core._FallbackException:
6354       return rank_eager_fallback(
6355           input, name=name, ctx=_ctx)
6356     except _core._NotOkStatusException as e:
6357       if name is not None:
6358         message = e.message + " name: " + name
6359       else:
6360         message = e.message
6361       _six.raise_from(_core._status_to_exception(e.code, message), None)
6362 
6363 
6364 def rank_eager_fallback(input, name=None, ctx=None):
6365   r"""This is the slowpath function for Eager mode.
6366   This is for function rank
6367   """
6368   _ctx = ctx if ctx else _context.context()
6369   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
6370   _inputs_flat = [input]
6371   _attrs = ("T", _attr_T)
6372   _result = _execute.execute(b"Rank", 1, inputs=_inputs_flat, attrs=_attrs,
6373                              ctx=_ctx, name=name)
6374   _execute.record_gradient(
6375       "Rank", _inputs_flat, _attrs, _result, name)
6376   _result, = _result
6377   return _result
6378 
6379 
6380 def ref_identity(input, name=None):
6381   r"""Return the same ref tensor as the input ref tensor.
6382 
6383   Args:
6384     input: A mutable `Tensor`.
6385     name: A name for the operation (optional).
6386 
6387   Returns:
6388     A mutable `Tensor`. Has the same type as `input`.
6389   """
6390   _ctx = _context._context
6391   if _ctx is None or not _ctx._eager_context.is_eager:
6392     _, _, _op = _op_def_lib._apply_op_helper(
6393         "RefIdentity", input=input, name=name)
6394     _result = _op.outputs[:]
6395     _inputs_flat = _op.inputs
6396     _attrs = ("T", _op.get_attr("T"))
6397     _execute.record_gradient(
6398       "RefIdentity", _inputs_flat, _attrs, _result, name)
6399     _result, = _result
6400     return _result
6401 
6402   else:
6403     raise RuntimeError("ref_identity op does not support eager execution. Arg 'output' is a ref.")
6404 
6405 
6406   raise RuntimeError("ref_identity op does not support eager execution. Arg 'output' is a ref.")
6407 
6408 @tf_export('reshape', 'manip.reshape')
6409 @deprecated_endpoints('manip.reshape')
6410 def reshape(tensor, shape, name=None):
6411   r"""Reshapes a tensor.
6412 
6413   Given `tensor`, this operation returns a tensor that has the same values
6414   as `tensor` with shape `shape`.
6415 
6416   If one component of `shape` is the special value -1, the size of that dimension
6417   is computed so that the total size remains constant.  In particular, a `shape`
6418   of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
6419 
6420   If `shape` is 1-D or higher, then the operation returns a tensor with shape
6421   `shape` filled with the values of `tensor`. In this case, the number of elements
6422   implied by `shape` must be the same as the number of elements in `tensor`.
6423 
6424   For example:
6425 
6426   ```
6427   # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
6428   # tensor 't' has shape [9]
6429   reshape(t, [3, 3]) ==> [[1, 2, 3],
6430                           [4, 5, 6],
6431                           [7, 8, 9]]
6432 
6433   # tensor 't' is [[[1, 1], [2, 2]],
6434   #                [[3, 3], [4, 4]]]
6435   # tensor 't' has shape [2, 2, 2]
6436   reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
6437                           [3, 3, 4, 4]]
6438 
6439   # tensor 't' is [[[1, 1, 1],
6440   #                 [2, 2, 2]],
6441   #                [[3, 3, 3],
6442   #                 [4, 4, 4]],
6443   #                [[5, 5, 5],
6444   #                 [6, 6, 6]]]
6445   # tensor 't' has shape [3, 2, 3]
6446   # pass '[-1]' to flatten 't'
6447   reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
6448 
6449   # -1 can also be used to infer the shape
6450 
6451   # -1 is inferred to be 9:
6452   reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
6453                            [4, 4, 4, 5, 5, 5, 6, 6, 6]]
6454   # -1 is inferred to be 2:
6455   reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
6456                            [4, 4, 4, 5, 5, 5, 6, 6, 6]]
6457   # -1 is inferred to be 3:
6458   reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
6459                                 [2, 2, 2],
6460                                 [3, 3, 3]],
6461                                [[4, 4, 4],
6462                                 [5, 5, 5],
6463                                 [6, 6, 6]]]
6464 
6465   # tensor 't' is [7]
6466   # shape `[]` reshapes to a scalar
6467   reshape(t, []) ==> 7
6468   ```
6469 
6470   Args:
6471     tensor: A `Tensor`.
6472     shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6473       Defines the shape of the output tensor.
6474     name: A name for the operation (optional).
6475 
6476   Returns:
6477     A `Tensor`. Has the same type as `tensor`.
6478   """
6479   _ctx = _context._context
6480   if _ctx is None or not _ctx._eager_context.is_eager:
6481     _, _, _op = _op_def_lib._apply_op_helper(
6482         "Reshape", tensor=tensor, shape=shape, name=name)
6483     _result = _op.outputs[:]
6484     _inputs_flat = _op.inputs
6485     _attrs = ("T", _op.get_attr("T"), "Tshape", _op.get_attr("Tshape"))
6486     _execute.record_gradient(
6487       "Reshape", _inputs_flat, _attrs, _result, name)
6488     _result, = _result
6489     return _result
6490 
6491   else:
6492     try:
6493       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6494         _ctx._context_handle, _ctx._eager_context.device_name, "Reshape",
6495         name, _ctx._post_execution_callbacks, tensor, shape)
6496       return _result
6497     except _core._FallbackException:
6498       return reshape_eager_fallback(
6499           tensor, shape, name=name, ctx=_ctx)
6500     except _core._NotOkStatusException as e:
6501       if name is not None:
6502         message = e.message + " name: " + name
6503       else:
6504         message = e.message
6505       _six.raise_from(_core._status_to_exception(e.code, message), None)
6506 
6507 
6508 def reshape_eager_fallback(tensor, shape, name=None, ctx=None):
6509   r"""This is the slowpath function for Eager mode.
6510   This is for function reshape
6511   """
6512   _ctx = ctx if ctx else _context.context()
6513   _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)
6514   _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
6515   _inputs_flat = [tensor, shape]
6516   _attrs = ("T", _attr_T, "Tshape", _attr_Tshape)
6517   _result = _execute.execute(b"Reshape", 1, inputs=_inputs_flat, attrs=_attrs,
6518                              ctx=_ctx, name=name)
6519   _execute.record_gradient(
6520       "Reshape", _inputs_flat, _attrs, _result, name)
6521   _result, = _result
6522   return _result
6523 
6524 
6525 def resource_strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):
6526   r"""Assign `value` to the sliced l-value reference of `ref`.
6527 
6528   The values of `value` are assigned to the positions in the variable
6529   `ref` that are selected by the slice parameters. The slice parameters
6530   `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
6531 
6532   NOTE this op currently does not support broadcasting and so `value`'s
6533   shape must be exactly the shape produced by the slice of `ref`.
6534 
6535   Args:
6536     ref: A `Tensor` of type `resource`.
6537     begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6538     end: A `Tensor`. Must have the same type as `begin`.
6539     strides: A `Tensor`. Must have the same type as `begin`.
6540     value: A `Tensor`.
6541     begin_mask: An optional `int`. Defaults to `0`.
6542     end_mask: An optional `int`. Defaults to `0`.
6543     ellipsis_mask: An optional `int`. Defaults to `0`.
6544     new_axis_mask: An optional `int`. Defaults to `0`.
6545     shrink_axis_mask: An optional `int`. Defaults to `0`.
6546     name: A name for the operation (optional).
6547 
6548   Returns:
6549     The created Operation.
6550   """
6551   _ctx = _context._context
6552   if _ctx is None or not _ctx._eager_context.is_eager:
6553     if begin_mask is None:
6554       begin_mask = 0
6555     begin_mask = _execute.make_int(begin_mask, "begin_mask")
6556     if end_mask is None:
6557       end_mask = 0
6558     end_mask = _execute.make_int(end_mask, "end_mask")
6559     if ellipsis_mask is None:
6560       ellipsis_mask = 0
6561     ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
6562     if new_axis_mask is None:
6563       new_axis_mask = 0
6564     new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
6565     if shrink_axis_mask is None:
6566       shrink_axis_mask = 0
6567     shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
6568     _, _, _op = _op_def_lib._apply_op_helper(
6569         "ResourceStridedSliceAssign", ref=ref, begin=begin, end=end,
6570         strides=strides, value=value, begin_mask=begin_mask,
6571         end_mask=end_mask, ellipsis_mask=ellipsis_mask,
6572         new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
6573         name=name)
6574     return _op
6575     _result = None
6576     return _result
6577 
6578   else:
6579     try:
6580       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6581         _ctx._context_handle, _ctx._eager_context.device_name,
6582         "ResourceStridedSliceAssign", name, _ctx._post_execution_callbacks,
6583         ref, begin, end, strides, value, "begin_mask", begin_mask, "end_mask",
6584         end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
6585         new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
6586       return _result
6587     except _core._FallbackException:
6588       return resource_strided_slice_assign_eager_fallback(
6589           ref, begin, end, strides, value, begin_mask=begin_mask,
6590           end_mask=end_mask, ellipsis_mask=ellipsis_mask,
6591           new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
6592           name=name, ctx=_ctx)
6593     except _core._NotOkStatusException as e:
6594       if name is not None:
6595         message = e.message + " name: " + name
6596       else:
6597         message = e.message
6598       _six.raise_from(_core._status_to_exception(e.code, message), None)
6599 
6600 
6601 def resource_strided_slice_assign_eager_fallback(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):
6602   r"""This is the slowpath function for Eager mode.
6603   This is for function resource_strided_slice_assign
6604   """
6605   _ctx = ctx if ctx else _context.context()
6606   if begin_mask is None:
6607     begin_mask = 0
6608   begin_mask = _execute.make_int(begin_mask, "begin_mask")
6609   if end_mask is None:
6610     end_mask = 0
6611   end_mask = _execute.make_int(end_mask, "end_mask")
6612   if ellipsis_mask is None:
6613     ellipsis_mask = 0
6614   ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
6615   if new_axis_mask is None:
6616     new_axis_mask = 0
6617   new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
6618   if shrink_axis_mask is None:
6619     shrink_axis_mask = 0
6620   shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
6621   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
6622   _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx)
6623   (begin, end, strides) = _inputs_Index
6624   ref = _ops.convert_to_tensor(ref, _dtypes.resource)
6625   _inputs_flat = [ref, begin, end, strides, value]
6626   _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask,
6627   "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
6628   new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
6629   _result = _execute.execute(b"ResourceStridedSliceAssign", 0,
6630                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
6631                              name=name)
6632   _result = None
6633   return _result
6634 
6635 
6636 def reverse(tensor, dims, name=None):
6637   r"""Reverses specific dimensions of a tensor.
6638 
6639   Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
6640   of `tensor`, this operation reverses each dimension i of `tensor` where
6641   `dims[i]` is `True`.
6642 
6643   `tensor` can have up to 8 dimensions. The number of dimensions
6644   of `tensor` must equal the number of elements in `dims`. In other words:
6645 
6646   `rank(tensor) = size(dims)`
6647 
6648   For example:
6649 
6650   ```
6651   # tensor 't' is [[[[ 0,  1,  2,  3],
6652   #                  [ 4,  5,  6,  7],
6653   #                  [ 8,  9, 10, 11]],
6654   #                 [[12, 13, 14, 15],
6655   #                  [16, 17, 18, 19],
6656   #                  [20, 21, 22, 23]]]]
6657   # tensor 't' shape is [1, 2, 3, 4]
6658 
6659   # 'dims' is [False, False, False, True]
6660   reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
6661                           [ 7,  6,  5,  4],
6662                           [ 11, 10, 9, 8]],
6663                          [[15, 14, 13, 12],
6664                           [19, 18, 17, 16],
6665                           [23, 22, 21, 20]]]]
6666 
6667   # 'dims' is [False, True, False, False]
6668   reverse(t, dims) ==> [[[[12, 13, 14, 15],
6669                           [16, 17, 18, 19],
6670                           [20, 21, 22, 23]
6671                          [[ 0,  1,  2,  3],
6672                           [ 4,  5,  6,  7],
6673                           [ 8,  9, 10, 11]]]]
6674 
6675   # 'dims' is [False, False, True, False]
6676   reverse(t, dims) ==> [[[[8, 9, 10, 11],
6677                           [4, 5, 6, 7],
6678                           [0, 1, 2, 3]]
6679                          [[20, 21, 22, 23],
6680                           [16, 17, 18, 19],
6681                           [12, 13, 14, 15]]]]
6682   ```
6683 
6684   Args:
6685     tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.
6686       Up to 8-D.
6687     dims: A `Tensor` of type `bool`. 1-D. The dimensions to reverse.
6688     name: A name for the operation (optional).
6689 
6690   Returns:
6691     A `Tensor`. Has the same type as `tensor`.
6692   """
6693   _ctx = _context._context
6694   if _ctx is None or not _ctx._eager_context.is_eager:
6695     _, _, _op = _op_def_lib._apply_op_helper(
6696         "Reverse", tensor=tensor, dims=dims, name=name)
6697     _result = _op.outputs[:]
6698     _inputs_flat = _op.inputs
6699     _attrs = ("T", _op.get_attr("T"))
6700     _execute.record_gradient(
6701       "Reverse", _inputs_flat, _attrs, _result, name)
6702     _result, = _result
6703     return _result
6704 
6705   else:
6706     try:
6707       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6708         _ctx._context_handle, _ctx._eager_context.device_name, "Reverse",
6709         name, _ctx._post_execution_callbacks, tensor, dims)
6710       return _result
6711     except _core._FallbackException:
6712       return reverse_eager_fallback(
6713           tensor, dims, name=name, ctx=_ctx)
6714     except _core._NotOkStatusException as e:
6715       if name is not None:
6716         message = e.message + " name: " + name
6717       else:
6718         message = e.message
6719       _six.raise_from(_core._status_to_exception(e.code, message), None)
6720 
6721 
6722 def reverse_eager_fallback(tensor, dims, name=None, ctx=None):
6723   r"""This is the slowpath function for Eager mode.
6724   This is for function reverse
6725   """
6726   _ctx = ctx if ctx else _context.context()
6727   _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)
6728   dims = _ops.convert_to_tensor(dims, _dtypes.bool)
6729   _inputs_flat = [tensor, dims]
6730   _attrs = ("T", _attr_T)
6731   _result = _execute.execute(b"Reverse", 1, inputs=_inputs_flat, attrs=_attrs,
6732                              ctx=_ctx, name=name)
6733   _execute.record_gradient(
6734       "Reverse", _inputs_flat, _attrs, _result, name)
6735   _result, = _result
6736   return _result
6737 
6738 
6739 def reverse_sequence(input, seq_lengths, seq_dim, batch_dim=0, name=None):
6740   r"""Reverses variable length slices.
6741 
6742   This op first slices `input` along the dimension `batch_dim`, and for each
6743   slice `i`, reverses the first `seq_lengths[i]` elements along
6744   the dimension `seq_dim`.
6745 
6746   The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
6747   and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
6748 
6749   The output slice `i` along dimension `batch_dim` is then given by input
6750   slice `i`, with the first `seq_lengths[i]` slices along dimension
6751   `seq_dim` reversed.
6752 
6753   For example:
6754 
6755   ```
6756   # Given this:
6757   batch_dim = 0
6758   seq_dim = 1
6759   input.dims = (4, 8, ...)
6760   seq_lengths = [7, 2, 3, 5]
6761 
6762   # then slices of input are reversed on seq_dim, but only up to seq_lengths:
6763   output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
6764   output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
6765   output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
6766   output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
6767 
6768   # while entries past seq_lens are copied through:
6769   output[0, 7:, :, ...] = input[0, 7:, :, ...]
6770   output[1, 2:, :, ...] = input[1, 2:, :, ...]
6771   output[2, 3:, :, ...] = input[2, 3:, :, ...]
6772   output[3, 2:, :, ...] = input[3, 2:, :, ...]
6773   ```
6774 
6775   In contrast, if:
6776 
6777   ```
6778   # Given this:
6779   batch_dim = 2
6780   seq_dim = 0
6781   input.dims = (8, ?, 4, ...)
6782   seq_lengths = [7, 2, 3, 5]
6783 
6784   # then slices of input are reversed on seq_dim, but only up to seq_lengths:
6785   output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
6786   output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
6787   output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
6788   output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
6789 
6790   # while entries past seq_lens are copied through:
6791   output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
6792   output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
6793   output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
6794   output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
6795   ```
6796 
6797   Args:
6798     input: A `Tensor`. The input to reverse.
6799     seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6800       1-D with length `input.dims(batch_dim)` and
6801       `max(seq_lengths) <= input.dims(seq_dim)`
6802     seq_dim: An `int`. The dimension which is partially reversed.
6803     batch_dim: An optional `int`. Defaults to `0`.
6804       The dimension along which reversal is performed.
6805     name: A name for the operation (optional).
6806 
6807   Returns:
6808     A `Tensor`. Has the same type as `input`.
6809   """
6810   _ctx = _context._context
6811   if _ctx is None or not _ctx._eager_context.is_eager:
6812     seq_dim = _execute.make_int(seq_dim, "seq_dim")
6813     if batch_dim is None:
6814       batch_dim = 0
6815     batch_dim = _execute.make_int(batch_dim, "batch_dim")
6816     _, _, _op = _op_def_lib._apply_op_helper(
6817         "ReverseSequence", input=input, seq_lengths=seq_lengths,
6818         seq_dim=seq_dim, batch_dim=batch_dim, name=name)
6819     _result = _op.outputs[:]
6820     _inputs_flat = _op.inputs
6821     _attrs = ("seq_dim", _op.get_attr("seq_dim"), "batch_dim",
6822               _op.get_attr("batch_dim"), "T", _op.get_attr("T"), "Tlen",
6823               _op.get_attr("Tlen"))
6824     _execute.record_gradient(
6825       "ReverseSequence", _inputs_flat, _attrs, _result, name)
6826     _result, = _result
6827     return _result
6828 
6829   else:
6830     try:
6831       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6832         _ctx._context_handle, _ctx._eager_context.device_name,
6833         "ReverseSequence", name, _ctx._post_execution_callbacks, input,
6834         seq_lengths, "seq_dim", seq_dim, "batch_dim", batch_dim)
6835       return _result
6836     except _core._FallbackException:
6837       return reverse_sequence_eager_fallback(
6838           input, seq_lengths, seq_dim=seq_dim, batch_dim=batch_dim, name=name,
6839           ctx=_ctx)
6840     except _core._NotOkStatusException as e:
6841       if name is not None:
6842         message = e.message + " name: " + name
6843       else:
6844         message = e.message
6845       _six.raise_from(_core._status_to_exception(e.code, message), None)
6846 
6847 
6848 def reverse_sequence_eager_fallback(input, seq_lengths, seq_dim, batch_dim=0, name=None, ctx=None):
6849   r"""This is the slowpath function for Eager mode.
6850   This is for function reverse_sequence
6851   """
6852   _ctx = ctx if ctx else _context.context()
6853   seq_dim = _execute.make_int(seq_dim, "seq_dim")
6854   if batch_dim is None:
6855     batch_dim = 0
6856   batch_dim = _execute.make_int(batch_dim, "batch_dim")
6857   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
6858   _attr_Tlen, (seq_lengths,) = _execute.args_to_matching_eager([seq_lengths], _ctx, _dtypes.int64)
6859   _inputs_flat = [input, seq_lengths]
6860   _attrs = ("seq_dim", seq_dim, "batch_dim", batch_dim, "T", _attr_T, "Tlen",
6861   _attr_Tlen)
6862   _result = _execute.execute(b"ReverseSequence", 1, inputs=_inputs_flat,
6863                              attrs=_attrs, ctx=_ctx, name=name)
6864   _execute.record_gradient(
6865       "ReverseSequence", _inputs_flat, _attrs, _result, name)
6866   _result, = _result
6867   return _result
6868 
6869 
6870 @tf_export('reverse', 'manip.reverse', 'reverse_v2')
6871 @deprecated_endpoints('manip.reverse', 'reverse_v2')
6872 def reverse_v2(tensor, axis, name=None):
6873   r"""Reverses specific dimensions of a tensor.
6874 
6875   NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
6876   `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
6877 
6878   Given a `tensor`, and a `int32` tensor `axis` representing the set of
6879   dimensions of `tensor` to reverse. This operation reverses each dimension
6880   `i` for which there exists `j` s.t. `axis[j] == i`.
6881 
6882   `tensor` can have up to 8 dimensions. The number of dimensions specified
6883   in `axis` may be 0 or more entries. If an index is specified more than
6884   once, a InvalidArgument error is raised.
6885 
6886   For example:
6887 
6888   ```
6889   # tensor 't' is [[[[ 0,  1,  2,  3],
6890   #                  [ 4,  5,  6,  7],
6891   #                  [ 8,  9, 10, 11]],
6892   #                 [[12, 13, 14, 15],
6893   #                  [16, 17, 18, 19],
6894   #                  [20, 21, 22, 23]]]]
6895   # tensor 't' shape is [1, 2, 3, 4]
6896 
6897   # 'dims' is [3] or 'dims' is [-1]
6898   reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
6899                           [ 7,  6,  5,  4],
6900                           [ 11, 10, 9, 8]],
6901                          [[15, 14, 13, 12],
6902                           [19, 18, 17, 16],
6903                           [23, 22, 21, 20]]]]
6904 
6905   # 'dims' is '[1]' (or 'dims' is '[-3]')
6906   reverse(t, dims) ==> [[[[12, 13, 14, 15],
6907                           [16, 17, 18, 19],
6908                           [20, 21, 22, 23]
6909                          [[ 0,  1,  2,  3],
6910                           [ 4,  5,  6,  7],
6911                           [ 8,  9, 10, 11]]]]
6912 
6913   # 'dims' is '[2]' (or 'dims' is '[-2]')
6914   reverse(t, dims) ==> [[[[8, 9, 10, 11],
6915                           [4, 5, 6, 7],
6916                           [0, 1, 2, 3]]
6917                          [[20, 21, 22, 23],
6918                           [16, 17, 18, 19],
6919                           [12, 13, 14, 15]]]]
6920   ```
6921 
6922   Args:
6923     tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.
6924       Up to 8-D.
6925     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6926       1-D. The indices of the dimensions to reverse. Must be in the range
6927       `[-rank(tensor), rank(tensor))`.
6928     name: A name for the operation (optional).
6929 
6930   Returns:
6931     A `Tensor`. Has the same type as `tensor`.
6932   """
6933   _ctx = _context._context
6934   if _ctx is None or not _ctx._eager_context.is_eager:
6935     _, _, _op = _op_def_lib._apply_op_helper(
6936         "ReverseV2", tensor=tensor, axis=axis, name=name)
6937     _result = _op.outputs[:]
6938     _inputs_flat = _op.inputs
6939     _attrs = ("Tidx", _op.get_attr("Tidx"), "T", _op.get_attr("T"))
6940     _execute.record_gradient(
6941       "ReverseV2", _inputs_flat, _attrs, _result, name)
6942     _result, = _result
6943     return _result
6944 
6945   else:
6946     try:
6947       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6948         _ctx._context_handle, _ctx._eager_context.device_name, "ReverseV2",
6949         name, _ctx._post_execution_callbacks, tensor, axis)
6950       return _result
6951     except _core._FallbackException:
6952       return reverse_v2_eager_fallback(
6953           tensor, axis, name=name, ctx=_ctx)
6954     except _core._NotOkStatusException as e:
6955       if name is not None:
6956         message = e.message + " name: " + name
6957       else:
6958         message = e.message
6959       _six.raise_from(_core._status_to_exception(e.code, message), None)
6960 
6961 
6962 def reverse_v2_eager_fallback(tensor, axis, name=None, ctx=None):
6963   r"""This is the slowpath function for Eager mode.
6964   This is for function reverse_v2
6965   """
6966   _ctx = ctx if ctx else _context.context()
6967   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
6968   _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)
6969   _inputs_flat = [tensor, axis]
6970   _attrs = ("Tidx", _attr_Tidx, "T", _attr_T)
6971   _result = _execute.execute(b"ReverseV2", 1, inputs=_inputs_flat,
6972                              attrs=_attrs, ctx=_ctx, name=name)
6973   _execute.record_gradient(
6974       "ReverseV2", _inputs_flat, _attrs, _result, name)
6975   _result, = _result
6976   return _result
6977 
6978 
6979 @tf_export('scatter_nd', 'manip.scatter_nd')
6980 @deprecated_endpoints('manip.scatter_nd')
6981 def scatter_nd(indices, updates, shape, name=None):
6982   r"""Scatter `updates` into a new tensor according to `indices`.
6983 
6984   Creates a new tensor by applying sparse `updates` to individual values or
6985   slices within a tensor (initially zero for numeric, empty for string) of
6986   the given `shape` according to indices.  This operator is the inverse of the
6987   `tf.gather_nd` operator which extracts values or slices from a given tensor.
6988 
6989   If `indices` contains duplicates, then their updates are accumulated (summed).
6990 
6991   **WARNING**: The order in which updates are applied is nondeterministic, so the
6992   output will be nondeterministic if `indices` contains duplicates -- because
6993   of some numerical approximation issues, numbers summed in different order
6994   may yield different results.
6995 
6996   `indices` is an integer tensor containing indices into a new tensor of shape
6997   `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
6998 
6999       indices.shape[-1] <= shape.rank
7000 
7001   The last dimension of `indices` corresponds to indices into elements
7002   (if `indices.shape[-1] = shape.rank`) or slices
7003   (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
7004   `shape`.  `updates` is a tensor with shape
7005 
7006       indices.shape[:-1] + shape[indices.shape[-1]:]
7007 
7008   The simplest form of scatter is to insert individual elements in a tensor by
7009   index. For example, say we want to insert 4 scattered elements in a rank-1
7010   tensor with 8 elements.
7011 
7012   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7013   <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
7014   </div>
7015 
7016   In Python, this scatter operation would look like this:
7017 
7018   ```python
7019       indices = tf.constant([[4], [3], [1], [7]])
7020       updates = tf.constant([9, 10, 11, 12])
7021       shape = tf.constant([8])
7022       scatter = tf.scatter_nd(indices, updates, shape)
7023       with tf.Session() as sess:
7024         print(sess.run(scatter))
7025   ```
7026 
7027   The resulting tensor would look like this:
7028 
7029       [0, 11, 0, 10, 9, 0, 0, 12]
7030 
7031   We can also, insert entire slices of a higher rank tensor all at once. For
7032   example, if we wanted to insert two slices in the first dimension of a
7033   rank-3 tensor with two matrices of new values.
7034 
7035   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7036   <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
7037   </div>
7038 
7039   In Python, this scatter operation would look like this:
7040 
7041   ```python
7042       indices = tf.constant([[0], [2]])
7043       updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
7044                               [7, 7, 7, 7], [8, 8, 8, 8]],
7045                              [[5, 5, 5, 5], [6, 6, 6, 6],
7046                               [7, 7, 7, 7], [8, 8, 8, 8]]])
7047       shape = tf.constant([4, 4, 4])
7048       scatter = tf.scatter_nd(indices, updates, shape)
7049       with tf.Session() as sess:
7050         print(sess.run(scatter))
7051   ```
7052 
7053   The resulting tensor would look like this:
7054 
7055       [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
7056        [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
7057        [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
7058        [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
7059 
7060   Note that on CPU, if an out of bound index is found, an error is returned.
7061   On GPU, if an out of bound index is found, the index is ignored.
7062 
7063   Args:
7064     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7065       Index tensor.
7066     updates: A `Tensor`. Updates to scatter into output.
7067     shape: A `Tensor`. Must have the same type as `indices`.
7068       1-D. The shape of the resulting tensor.
7069     name: A name for the operation (optional).
7070 
7071   Returns:
7072     A `Tensor`. Has the same type as `updates`.
7073   """
7074   _ctx = _context._context
7075   if _ctx is None or not _ctx._eager_context.is_eager:
7076     _, _, _op = _op_def_lib._apply_op_helper(
7077         "ScatterNd", indices=indices, updates=updates, shape=shape, name=name)
7078     _result = _op.outputs[:]
7079     _inputs_flat = _op.inputs
7080     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
7081     _execute.record_gradient(
7082       "ScatterNd", _inputs_flat, _attrs, _result, name)
7083     _result, = _result
7084     return _result
7085 
7086   else:
7087     try:
7088       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7089         _ctx._context_handle, _ctx._eager_context.device_name, "ScatterNd",
7090         name, _ctx._post_execution_callbacks, indices, updates, shape)
7091       return _result
7092     except _core._FallbackException:
7093       return scatter_nd_eager_fallback(
7094           indices, updates, shape, name=name, ctx=_ctx)
7095     except _core._NotOkStatusException as e:
7096       if name is not None:
7097         message = e.message + " name: " + name
7098       else:
7099         message = e.message
7100       _six.raise_from(_core._status_to_exception(e.code, message), None)
7101 
7102 
7103 def scatter_nd_eager_fallback(indices, updates, shape, name=None, ctx=None):
7104   r"""This is the slowpath function for Eager mode.
7105   This is for function scatter_nd
7106   """
7107   _ctx = ctx if ctx else _context.context()
7108   _attr_T, (updates,) = _execute.args_to_matching_eager([updates], _ctx)
7109   _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([indices, shape], _ctx)
7110   (indices, shape) = _inputs_Tindices
7111   _inputs_flat = [indices, updates, shape]
7112   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
7113   _result = _execute.execute(b"ScatterNd", 1, inputs=_inputs_flat,
7114                              attrs=_attrs, ctx=_ctx, name=name)
7115   _execute.record_gradient(
7116       "ScatterNd", _inputs_flat, _attrs, _result, name)
7117   _result, = _result
7118   return _result
7119 
7120 
7121 def scatter_nd_non_aliasing_add(input, indices, updates, name=None):
7122   r"""Applies sparse addition to `input` using individual values or slices
7123 
7124   from `updates` according to indices `indices`.  The updates are non-aliasing:
7125   `input` is only modified in-place if no other operations will use it.
7126   Otherwise, a copy of `input` is made.  This operation has a gradient with
7127   respect to both `input` and `updates`.
7128 
7129   `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
7130 
7131   `indices` must be integer tensor, containing indices into `input`.
7132   It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
7133 
7134   The innermost dimension of `indices` (with length `K`) corresponds to
7135   indices into elements (if `K = P`) or `(P-K)`-dimensional slices
7136   (if `K < P`) along the `K`th dimension of `input`.
7137 
7138   `updates` is `Tensor` of rank `Q-1+P-K` with shape:
7139 
7140   $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
7141 
7142   For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
7143   elements. In Python, that addition would look like this:
7144 
7145       input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
7146       indices = tf.constant([[4], [3], [1], [7]])
7147       updates = tf.constant([9, 10, 11, 12])
7148       output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
7149       with tf.Session() as sess:
7150         print(sess.run(output))
7151 
7152   The resulting value `output` would look like this:
7153 
7154       [1, 13, 3, 14, 14, 6, 7, 20]
7155 
7156   See `tf.scatter_nd` for more details about how to make updates to slices.
7157 
7158   Args:
7159     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`.
7160       A Tensor.
7161     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7162       A Tensor. Must be one of the following types: `int32`, `int64`.
7163       A tensor of indices into `input`.
7164     updates: A `Tensor`. Must have the same type as `input`.
7165       A Tensor. Must have the same type as ref. A tensor of updated values
7166       to add to `input`.
7167     name: A name for the operation (optional).
7168 
7169   Returns:
7170     A `Tensor`. Has the same type as `input`.
7171   """
7172   _ctx = _context._context
7173   if _ctx is None or not _ctx._eager_context.is_eager:
7174     _, _, _op = _op_def_lib._apply_op_helper(
7175         "ScatterNdNonAliasingAdd", input=input, indices=indices,
7176         updates=updates, name=name)
7177     _result = _op.outputs[:]
7178     _inputs_flat = _op.inputs
7179     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
7180     _execute.record_gradient(
7181       "ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result, name)
7182     _result, = _result
7183     return _result
7184 
7185   else:
7186     try:
7187       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7188         _ctx._context_handle, _ctx._eager_context.device_name,
7189         "ScatterNdNonAliasingAdd", name, _ctx._post_execution_callbacks,
7190         input, indices, updates)
7191       return _result
7192     except _core._FallbackException:
7193       return scatter_nd_non_aliasing_add_eager_fallback(
7194           input, indices, updates, name=name, ctx=_ctx)
7195     except _core._NotOkStatusException as e:
7196       if name is not None:
7197         message = e.message + " name: " + name
7198       else:
7199         message = e.message
7200       _six.raise_from(_core._status_to_exception(e.code, message), None)
7201 
7202 
7203 def scatter_nd_non_aliasing_add_eager_fallback(input, indices, updates, name=None, ctx=None):
7204   r"""This is the slowpath function for Eager mode.
7205   This is for function scatter_nd_non_aliasing_add
7206   """
7207   _ctx = ctx if ctx else _context.context()
7208   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, updates], _ctx)
7209   (input, updates) = _inputs_T
7210   _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
7211   _inputs_flat = [input, indices, updates]
7212   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
7213   _result = _execute.execute(b"ScatterNdNonAliasingAdd", 1,
7214                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7215                              name=name)
7216   _execute.record_gradient(
7217       "ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result, name)
7218   _result, = _result
7219   return _result
7220 
7221 
7222 def shape(input, out_type=_dtypes.int32, name=None):
7223   r"""Returns the shape of a tensor.
7224 
7225   This operation returns a 1-D integer tensor representing the shape of `input`.
7226 
7227   For example:
7228 
7229   ```
7230   # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
7231   shape(t) ==> [2, 2, 3]
7232   ```
7233 
7234   Args:
7235     input: A `Tensor`.
7236     out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
7237     name: A name for the operation (optional).
7238 
7239   Returns:
7240     A `Tensor` of type `out_type`.
7241   """
7242   _ctx = _context._context
7243   if _ctx is None or not _ctx._eager_context.is_eager:
7244     if out_type is None:
7245       out_type = _dtypes.int32
7246     out_type = _execute.make_type(out_type, "out_type")
7247     _, _, _op = _op_def_lib._apply_op_helper(
7248         "Shape", input=input, out_type=out_type, name=name)
7249     _result = _op.outputs[:]
7250     _inputs_flat = _op.inputs
7251     _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type"))
7252     _execute.record_gradient(
7253       "Shape", _inputs_flat, _attrs, _result, name)
7254     _result, = _result
7255     return _result
7256 
7257   else:
7258     try:
7259       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7260         _ctx._context_handle, _ctx._eager_context.device_name, "Shape", name,
7261         _ctx._post_execution_callbacks, input, "out_type", out_type)
7262       return _result
7263     except _core._FallbackException:
7264       return shape_eager_fallback(
7265           input, out_type=out_type, name=name, ctx=_ctx)
7266     except _core._NotOkStatusException as e:
7267       if name is not None:
7268         message = e.message + " name: " + name
7269       else:
7270         message = e.message
7271       _six.raise_from(_core._status_to_exception(e.code, message), None)
7272 
7273 
7274 def shape_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):
7275   r"""This is the slowpath function for Eager mode.
7276   This is for function shape
7277   """
7278   _ctx = ctx if ctx else _context.context()
7279   if out_type is None:
7280     out_type = _dtypes.int32
7281   out_type = _execute.make_type(out_type, "out_type")
7282   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7283   _inputs_flat = [input]
7284   _attrs = ("T", _attr_T, "out_type", out_type)
7285   _result = _execute.execute(b"Shape", 1, inputs=_inputs_flat, attrs=_attrs,
7286                              ctx=_ctx, name=name)
7287   _execute.record_gradient(
7288       "Shape", _inputs_flat, _attrs, _result, name)
7289   _result, = _result
7290   return _result
7291 
7292 
7293 def shape_n(input, out_type=_dtypes.int32, name=None):
7294   r"""Returns shape of tensors.
7295 
7296   This operation returns N 1-D integer tensors representing shape of `input[i]s`.
7297 
7298   Args:
7299     input: A list of at least 1 `Tensor` objects with the same type.
7300     out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
7301     name: A name for the operation (optional).
7302 
7303   Returns:
7304     A list with the same length as `input` of `Tensor` objects with type `out_type`.
7305   """
7306   _ctx = _context._context
7307   if _ctx is None or not _ctx._eager_context.is_eager:
7308     if not isinstance(input, (list, tuple)):
7309       raise TypeError(
7310           "Expected list for 'input' argument to "
7311           "'shape_n' Op, not %r." % input)
7312     _attr_N = len(input)
7313     if out_type is None:
7314       out_type = _dtypes.int32
7315     out_type = _execute.make_type(out_type, "out_type")
7316     _, _, _op = _op_def_lib._apply_op_helper(
7317         "ShapeN", input=input, out_type=out_type, name=name)
7318     _result = _op.outputs[:]
7319     _inputs_flat = _op.inputs
7320     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "out_type",
7321               _op.get_attr("out_type"))
7322     _execute.record_gradient(
7323       "ShapeN", _inputs_flat, _attrs, _result, name)
7324     return _result
7325 
7326   else:
7327     try:
7328       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7329         _ctx._context_handle, _ctx._eager_context.device_name, "ShapeN", name,
7330         _ctx._post_execution_callbacks, input, "out_type", out_type)
7331       return _result
7332     except _core._FallbackException:
7333       return shape_n_eager_fallback(
7334           input, out_type=out_type, name=name, ctx=_ctx)
7335     except _core._NotOkStatusException as e:
7336       if name is not None:
7337         message = e.message + " name: " + name
7338       else:
7339         message = e.message
7340       _six.raise_from(_core._status_to_exception(e.code, message), None)
7341 
7342 
7343 def shape_n_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):
7344   r"""This is the slowpath function for Eager mode.
7345   This is for function shape_n
7346   """
7347   _ctx = ctx if ctx else _context.context()
7348   if not isinstance(input, (list, tuple)):
7349     raise TypeError(
7350         "Expected list for 'input' argument to "
7351         "'shape_n' Op, not %r." % input)
7352   _attr_N = len(input)
7353   if out_type is None:
7354     out_type = _dtypes.int32
7355   out_type = _execute.make_type(out_type, "out_type")
7356   _attr_T, input = _execute.args_to_matching_eager(list(input), _ctx)
7357   _inputs_flat = list(input)
7358   _attrs = ("N", _attr_N, "T", _attr_T, "out_type", out_type)
7359   _result = _execute.execute(b"ShapeN", _attr_N, inputs=_inputs_flat,
7360                              attrs=_attrs, ctx=_ctx, name=name)
7361   _execute.record_gradient(
7362       "ShapeN", _inputs_flat, _attrs, _result, name)
7363   return _result
7364 
7365 
7366 def size(input, out_type=_dtypes.int32, name=None):
7367   r"""Returns the size of a tensor.
7368 
7369   This operation returns an integer representing the number of elements in
7370   `input`.
7371 
7372   For example:
7373 
7374   ```
7375   # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
7376   size(t) ==> 12
7377   ```
7378 
7379   Args:
7380     input: A `Tensor`.
7381     out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
7382     name: A name for the operation (optional).
7383 
7384   Returns:
7385     A `Tensor` of type `out_type`.
7386   """
7387   _ctx = _context._context
7388   if _ctx is None or not _ctx._eager_context.is_eager:
7389     if out_type is None:
7390       out_type = _dtypes.int32
7391     out_type = _execute.make_type(out_type, "out_type")
7392     _, _, _op = _op_def_lib._apply_op_helper(
7393         "Size", input=input, out_type=out_type, name=name)
7394     _result = _op.outputs[:]
7395     _inputs_flat = _op.inputs
7396     _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type"))
7397     _execute.record_gradient(
7398       "Size", _inputs_flat, _attrs, _result, name)
7399     _result, = _result
7400     return _result
7401 
7402   else:
7403     try:
7404       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7405         _ctx._context_handle, _ctx._eager_context.device_name, "Size", name,
7406         _ctx._post_execution_callbacks, input, "out_type", out_type)
7407       return _result
7408     except _core._FallbackException:
7409       return size_eager_fallback(
7410           input, out_type=out_type, name=name, ctx=_ctx)
7411     except _core._NotOkStatusException as e:
7412       if name is not None:
7413         message = e.message + " name: " + name
7414       else:
7415         message = e.message
7416       _six.raise_from(_core._status_to_exception(e.code, message), None)
7417 
7418 
7419 def size_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):
7420   r"""This is the slowpath function for Eager mode.
7421   This is for function size
7422   """
7423   _ctx = ctx if ctx else _context.context()
7424   if out_type is None:
7425     out_type = _dtypes.int32
7426   out_type = _execute.make_type(out_type, "out_type")
7427   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7428   _inputs_flat = [input]
7429   _attrs = ("T", _attr_T, "out_type", out_type)
7430   _result = _execute.execute(b"Size", 1, inputs=_inputs_flat, attrs=_attrs,
7431                              ctx=_ctx, name=name)
7432   _execute.record_gradient(
7433       "Size", _inputs_flat, _attrs, _result, name)
7434   _result, = _result
7435   return _result
7436 
7437 
7438 def _slice(input, begin, size, name=None):
7439   r"""Return a slice from 'input'.
7440 
7441   The output tensor is a tensor with dimensions described by 'size'
7442   whose values are extracted from 'input' starting at the offsets in
7443   'begin'.
7444 
7445   *Requirements*:
7446     0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
7447 
7448   Args:
7449     input: A `Tensor`.
7450     begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7451       begin[i] specifies the offset into the 'i'th dimension of
7452       'input' to slice from.
7453     size: A `Tensor`. Must have the same type as `begin`.
7454       size[i] specifies the number of elements of the 'i'th dimension
7455       of 'input' to slice. If size[i] is -1, all remaining elements in dimension
7456       i are included in the slice (i.e. this is equivalent to setting
7457       size[i] = input.dim_size(i) - begin[i]).
7458     name: A name for the operation (optional).
7459 
7460   Returns:
7461     A `Tensor`. Has the same type as `input`.
7462   """
7463   _ctx = _context._context
7464   if _ctx is None or not _ctx._eager_context.is_eager:
7465     _, _, _op = _op_def_lib._apply_op_helper(
7466         "Slice", input=input, begin=begin, size=size, name=name)
7467     _result = _op.outputs[:]
7468     _inputs_flat = _op.inputs
7469     _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"))
7470     _execute.record_gradient(
7471       "Slice", _inputs_flat, _attrs, _result, name)
7472     _result, = _result
7473     return _result
7474 
7475   else:
7476     try:
7477       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7478         _ctx._context_handle, _ctx._eager_context.device_name, "Slice", name,
7479         _ctx._post_execution_callbacks, input, begin, size)
7480       return _result
7481     except _core._FallbackException:
7482       return _slice_eager_fallback(
7483           input, begin, size, name=name, ctx=_ctx)
7484     except _core._NotOkStatusException as e:
7485       if name is not None:
7486         message = e.message + " name: " + name
7487       else:
7488         message = e.message
7489       _six.raise_from(_core._status_to_exception(e.code, message), None)
7490 
7491 
7492 def _slice_eager_fallback(input, begin, size, name=None, ctx=None):
7493   r"""This is the slowpath function for Eager mode.
7494   This is for function _slice
7495   """
7496   _ctx = ctx if ctx else _context.context()
7497   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7498   _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, size], _ctx)
7499   (begin, size) = _inputs_Index
7500   _inputs_flat = [input, begin, size]
7501   _attrs = ("T", _attr_T, "Index", _attr_Index)
7502   _result = _execute.execute(b"Slice", 1, inputs=_inputs_flat, attrs=_attrs,
7503                              ctx=_ctx, name=name)
7504   _execute.record_gradient(
7505       "Slice", _inputs_flat, _attrs, _result, name)
7506   _result, = _result
7507   return _result
7508 
7509 
7510 def snapshot(input, name=None):
7511   r"""Returns a copy of the input tensor.
7512 
7513   Args:
7514     input: A `Tensor`.
7515     name: A name for the operation (optional).
7516 
7517   Returns:
7518     A `Tensor`. Has the same type as `input`.
7519   """
7520   _ctx = _context._context
7521   if _ctx is None or not _ctx._eager_context.is_eager:
7522     _, _, _op = _op_def_lib._apply_op_helper(
7523         "Snapshot", input=input, name=name)
7524     _result = _op.outputs[:]
7525     _inputs_flat = _op.inputs
7526     _attrs = ("T", _op.get_attr("T"))
7527     _execute.record_gradient(
7528       "Snapshot", _inputs_flat, _attrs, _result, name)
7529     _result, = _result
7530     return _result
7531 
7532   else:
7533     try:
7534       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7535         _ctx._context_handle, _ctx._eager_context.device_name, "Snapshot",
7536         name, _ctx._post_execution_callbacks, input)
7537       return _result
7538     except _core._FallbackException:
7539       return snapshot_eager_fallback(
7540           input, name=name, ctx=_ctx)
7541     except _core._NotOkStatusException as e:
7542       if name is not None:
7543         message = e.message + " name: " + name
7544       else:
7545         message = e.message
7546       _six.raise_from(_core._status_to_exception(e.code, message), None)
7547 
7548 
7549 def snapshot_eager_fallback(input, name=None, ctx=None):
7550   r"""This is the slowpath function for Eager mode.
7551   This is for function snapshot
7552   """
7553   _ctx = ctx if ctx else _context.context()
7554   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7555   _inputs_flat = [input]
7556   _attrs = ("T", _attr_T)
7557   _result = _execute.execute(b"Snapshot", 1, inputs=_inputs_flat,
7558                              attrs=_attrs, ctx=_ctx, name=name)
7559   _execute.record_gradient(
7560       "Snapshot", _inputs_flat, _attrs, _result, name)
7561   _result, = _result
7562   return _result
7563 
7564 
7565 def space_to_batch(input, paddings, block_size, name=None):
7566   r"""SpaceToBatch for 4-D tensors of type T.
7567 
7568   This is a legacy version of the more general SpaceToBatchND.
7569 
7570   Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
7571   More specifically, this op outputs a copy of the input tensor where values from
7572   the `height` and `width` dimensions are moved to the `batch` dimension. After
7573   the zero-padding, both `height` and `width` of the input must be divisible by the
7574   block size.
7575 
7576   Args:
7577     input: A `Tensor`. 4-D with shape `[batch, height, width, depth]`.
7578     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7579       2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
7580         the padding of the input with zeros across the spatial dimensions as follows:
7581 
7582             paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
7583 
7584         The effective spatial dimensions of the zero-padded input tensor will be:
7585 
7586             height_pad = pad_top + height + pad_bottom
7587             width_pad = pad_left + width + pad_right
7588 
7589       The attr `block_size` must be greater than one. It indicates the block size.
7590 
7591         * Non-overlapping blocks of size `block_size x block size` in the height and
7592           width dimensions are rearranged into the batch dimension at each location.
7593         * The batch of the output tensor is `batch * block_size * block_size`.
7594         * Both height_pad and width_pad must be divisible by block_size.
7595 
7596       The shape of the output will be:
7597 
7598           [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
7599            depth]
7600 
7601       Some examples:
7602 
7603       (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
7604 
7605       ```
7606       x = [[[[1], [2]], [[3], [4]]]]
7607       ```
7608 
7609       The output tensor has shape `[4, 1, 1, 1]` and value:
7610 
7611       ```
7612       [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
7613       ```
7614 
7615       (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
7616 
7617       ```
7618       x = [[[[1, 2, 3], [4, 5, 6]],
7619             [[7, 8, 9], [10, 11, 12]]]]
7620       ```
7621 
7622       The output tensor has shape `[4, 1, 1, 3]` and value:
7623 
7624       ```
7625       [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
7626       ```
7627 
7628       (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
7629 
7630       ```
7631       x = [[[[1],   [2],  [3],  [4]],
7632             [[5],   [6],  [7],  [8]],
7633             [[9],  [10], [11],  [12]],
7634             [[13], [14], [15],  [16]]]]
7635       ```
7636 
7637       The output tensor has shape `[4, 2, 2, 1]` and value:
7638 
7639       ```
7640       x = [[[[1], [3]], [[9], [11]]],
7641            [[[2], [4]], [[10], [12]]],
7642            [[[5], [7]], [[13], [15]]],
7643            [[[6], [8]], [[14], [16]]]]
7644       ```
7645 
7646       (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
7647 
7648       ```
7649       x = [[[[1],   [2],  [3],  [4]],
7650             [[5],   [6],  [7],  [8]]],
7651            [[[9],  [10], [11],  [12]],
7652             [[13], [14], [15],  [16]]]]
7653       ```
7654 
7655       The output tensor has shape `[8, 1, 2, 1]` and value:
7656 
7657       ```
7658       x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
7659            [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
7660       ```
7661 
7662       Among others, this operation is useful for reducing atrous convolution into
7663       regular convolution.
7664     block_size: An `int` that is `>= 2`.
7665     name: A name for the operation (optional).
7666 
7667   Returns:
7668     A `Tensor`. Has the same type as `input`.
7669   """
7670   _ctx = _context._context
7671   if _ctx is None or not _ctx._eager_context.is_eager:
7672     block_size = _execute.make_int(block_size, "block_size")
7673     _, _, _op = _op_def_lib._apply_op_helper(
7674         "SpaceToBatch", input=input, paddings=paddings, block_size=block_size,
7675         name=name)
7676     _result = _op.outputs[:]
7677     _inputs_flat = _op.inputs
7678     _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"),
7679               "block_size", _op.get_attr("block_size"))
7680     _execute.record_gradient(
7681       "SpaceToBatch", _inputs_flat, _attrs, _result, name)
7682     _result, = _result
7683     return _result
7684 
7685   else:
7686     try:
7687       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7688         _ctx._context_handle, _ctx._eager_context.device_name, "SpaceToBatch",
7689         name, _ctx._post_execution_callbacks, input, paddings, "block_size",
7690         block_size)
7691       return _result
7692     except _core._FallbackException:
7693       return space_to_batch_eager_fallback(
7694           input, paddings, block_size=block_size, name=name, ctx=_ctx)
7695     except _core._NotOkStatusException as e:
7696       if name is not None:
7697         message = e.message + " name: " + name
7698       else:
7699         message = e.message
7700       _six.raise_from(_core._status_to_exception(e.code, message), None)
7701 
7702 
7703 def space_to_batch_eager_fallback(input, paddings, block_size, name=None, ctx=None):
7704   r"""This is the slowpath function for Eager mode.
7705   This is for function space_to_batch
7706   """
7707   _ctx = ctx if ctx else _context.context()
7708   block_size = _execute.make_int(block_size, "block_size")
7709   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7710   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
7711   _inputs_flat = [input, paddings]
7712   _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "block_size",
7713   block_size)
7714   _result = _execute.execute(b"SpaceToBatch", 1, inputs=_inputs_flat,
7715                              attrs=_attrs, ctx=_ctx, name=name)
7716   _execute.record_gradient(
7717       "SpaceToBatch", _inputs_flat, _attrs, _result, name)
7718   _result, = _result
7719   return _result
7720 
7721 
7722 @tf_export('space_to_batch_nd', 'manip.space_to_batch_nd')
7723 @deprecated_endpoints('manip.space_to_batch_nd')
7724 def space_to_batch_nd(input, block_shape, paddings, name=None):
7725   r"""SpaceToBatch for N-D tensors of type T.
7726 
7727   This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
7728   grid of blocks of shape `block_shape`, and interleaves these blocks with the
7729   "batch" dimension (0) such that in the output, the spatial dimensions
7730   `[1, ..., M]` correspond to the position within the grid, and the batch
7731   dimension combines both the position within a spatial block and the original
7732   batch position.  Prior to division into blocks, the spatial dimensions of the
7733   input are optionally zero padded according to `paddings`.  See below for a
7734   precise description.
7735 
7736   Args:
7737     input: A `Tensor`.
7738       N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
7739       where spatial_shape has `M` dimensions.
7740     block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7741       1-D with shape `[M]`, all values must be >= 1.
7742     paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7743       2-D with shape `[M, 2]`, all values must be >= 0.
7744         `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
7745         `i + 1`, which corresponds to spatial dimension `i`.  It is required that
7746         `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
7747 
7748       This operation is equivalent to the following steps:
7749 
7750       1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
7751          input according to `paddings` to produce `padded` of shape `padded_shape`.
7752 
7753       2. Reshape `padded` to `reshaped_padded` of shape:
7754 
7755            [batch] +
7756            [padded_shape[1] / block_shape[0],
7757              block_shape[0],
7758             ...,
7759             padded_shape[M] / block_shape[M-1],
7760             block_shape[M-1]] +
7761            remaining_shape
7762 
7763       3. Permute dimensions of `reshaped_padded` to produce
7764          `permuted_reshaped_padded` of shape:
7765 
7766            block_shape +
7767            [batch] +
7768            [padded_shape[1] / block_shape[0],
7769             ...,
7770             padded_shape[M] / block_shape[M-1]] +
7771            remaining_shape
7772 
7773       4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
7774          dimension, producing an output tensor of shape:
7775 
7776            [batch * prod(block_shape)] +
7777            [padded_shape[1] / block_shape[0],
7778             ...,
7779             padded_shape[M] / block_shape[M-1]] +
7780            remaining_shape
7781 
7782       Some examples:
7783 
7784       (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
7785           `paddings = [[0, 0], [0, 0]]`:
7786 
7787       ```
7788       x = [[[[1], [2]], [[3], [4]]]]
7789       ```
7790 
7791       The output tensor has shape `[4, 1, 1, 1]` and value:
7792 
7793       ```
7794       [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
7795       ```
7796 
7797       (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
7798           `paddings = [[0, 0], [0, 0]]`:
7799 
7800       ```
7801       x = [[[[1, 2, 3], [4, 5, 6]],
7802             [[7, 8, 9], [10, 11, 12]]]]
7803       ```
7804 
7805       The output tensor has shape `[4, 1, 1, 3]` and value:
7806 
7807       ```
7808       [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
7809       ```
7810 
7811       (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
7812           `paddings = [[0, 0], [0, 0]]`:
7813 
7814       ```
7815       x = [[[[1],   [2],  [3],  [4]],
7816             [[5],   [6],  [7],  [8]],
7817             [[9],  [10], [11],  [12]],
7818             [[13], [14], [15],  [16]]]]
7819       ```
7820 
7821       The output tensor has shape `[4, 2, 2, 1]` and value:
7822 
7823       ```
7824       x = [[[[1], [3]], [[9], [11]]],
7825            [[[2], [4]], [[10], [12]]],
7826            [[[5], [7]], [[13], [15]]],
7827            [[[6], [8]], [[14], [16]]]]
7828       ```
7829 
7830       (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
7831           paddings = `[[0, 0], [2, 0]]`:
7832 
7833       ```
7834       x = [[[[1],   [2],  [3],  [4]],
7835             [[5],   [6],  [7],  [8]]],
7836            [[[9],  [10], [11],  [12]],
7837             [[13], [14], [15],  [16]]]]
7838       ```
7839 
7840       The output tensor has shape `[8, 1, 3, 1]` and value:
7841 
7842       ```
7843       x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
7844            [[[0], [2], [4]]], [[[0], [10], [12]]],
7845            [[[0], [5], [7]]], [[[0], [13], [15]]],
7846            [[[0], [6], [8]]], [[[0], [14], [16]]]]
7847       ```
7848 
7849       Among others, this operation is useful for reducing atrous convolution into
7850       regular convolution.
7851     name: A name for the operation (optional).
7852 
7853   Returns:
7854     A `Tensor`. Has the same type as `input`.
7855   """
7856   _ctx = _context._context
7857   if _ctx is None or not _ctx._eager_context.is_eager:
7858     _, _, _op = _op_def_lib._apply_op_helper(
7859         "SpaceToBatchND", input=input, block_shape=block_shape,
7860         paddings=paddings, name=name)
7861     _result = _op.outputs[:]
7862     _inputs_flat = _op.inputs
7863     _attrs = ("T", _op.get_attr("T"), "Tblock_shape",
7864               _op.get_attr("Tblock_shape"), "Tpaddings",
7865               _op.get_attr("Tpaddings"))
7866     _execute.record_gradient(
7867       "SpaceToBatchND", _inputs_flat, _attrs, _result, name)
7868     _result, = _result
7869     return _result
7870 
7871   else:
7872     try:
7873       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7874         _ctx._context_handle, _ctx._eager_context.device_name,
7875         "SpaceToBatchND", name, _ctx._post_execution_callbacks, input,
7876         block_shape, paddings)
7877       return _result
7878     except _core._FallbackException:
7879       return space_to_batch_nd_eager_fallback(
7880           input, block_shape, paddings, name=name, ctx=_ctx)
7881     except _core._NotOkStatusException as e:
7882       if name is not None:
7883         message = e.message + " name: " + name
7884       else:
7885         message = e.message
7886       _six.raise_from(_core._status_to_exception(e.code, message), None)
7887 
7888 
7889 def space_to_batch_nd_eager_fallback(input, block_shape, paddings, name=None, ctx=None):
7890   r"""This is the slowpath function for Eager mode.
7891   This is for function space_to_batch_nd
7892   """
7893   _ctx = ctx if ctx else _context.context()
7894   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7895   _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32)
7896   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)
7897   _inputs_flat = [input, block_shape, paddings]
7898   _attrs = ("T", _attr_T, "Tblock_shape", _attr_Tblock_shape, "Tpaddings",
7899   _attr_Tpaddings)
7900   _result = _execute.execute(b"SpaceToBatchND", 1, inputs=_inputs_flat,
7901                              attrs=_attrs, ctx=_ctx, name=name)
7902   _execute.record_gradient(
7903       "SpaceToBatchND", _inputs_flat, _attrs, _result, name)
7904   _result, = _result
7905   return _result
7906 
7907 
7908 def space_to_depth(input, block_size, data_format="NHWC", name=None):
7909   r"""SpaceToDepth for tensors of type T.
7910 
7911   Rearranges blocks of spatial data, into depth. More specifically,
7912   this op outputs a copy of the input tensor where values from the `height`
7913   and `width` dimensions are moved to the `depth` dimension.
7914   The attr `block_size` indicates the input block size.
7915 
7916     * Non-overlapping blocks of size `block_size x block size` are rearranged
7917       into depth at each location.
7918     * The depth of the output tensor is `block_size * block_size * input_depth`.
7919     * The Y, X coordinates within each block of the input become the high order
7920       component of the output channel index.
7921     * The input tensor's height and width must be divisible by block_size.
7922 
7923   The `data_format` attr specifies the layout of the input and output tensors
7924   with the following options:
7925     "NHWC": `[ batch, height, width, channels ]`
7926     "NCHW": `[ batch, channels, height, width ]`
7927     "NCHW_VECT_C":
7928         `qint8 [ batch, channels / 4, height, width, 4 ]`
7929 
7930   It is useful to consider the operation as transforming a 6-D Tensor.
7931   e.g. for data_format = NHWC,
7932        Each element in the input tensor can be specified via 6 coordinates,
7933        ordered by decreasing memory layout significance as:
7934        n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
7935                           within the output image, bX, bY means coordinates
7936                           within the input block, iC means input channels).
7937        The output would be a transpose to the following layout:
7938        n,oY,oX,bY,bX,iC
7939 
7940   This operation is useful for resizing the activations between convolutions
7941   (but keeping all data), e.g. instead of pooling. It is also useful for training
7942   purely convolutional models.
7943 
7944   For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
7945   block_size = 2:
7946 
7947   ```
7948   x = [[[[1], [2]],
7949         [[3], [4]]]]
7950   ```
7951 
7952   This operation will output a tensor of shape `[1, 1, 1, 4]`:
7953 
7954   ```
7955   [[[[1, 2, 3, 4]]]]
7956   ```
7957 
7958   Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
7959   the corresponding output will have a single element (i.e. width and height are
7960   both 1) and will have a depth of 4 channels (1 * block_size * block_size).
7961   The output element shape is `[1, 1, 4]`.
7962 
7963   For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
7964 
7965   ```
7966   x = [[[[1, 2, 3], [4, 5, 6]],
7967         [[7, 8, 9], [10, 11, 12]]]]
7968   ```
7969 
7970   This operation, for block_size of 2, will return the following tensor of shape
7971   `[1, 1, 1, 12]`
7972 
7973   ```
7974   [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
7975   ```
7976 
7977   Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
7978 
7979   ```
7980   x = [[[[1],   [2],  [5],  [6]],
7981         [[3],   [4],  [7],  [8]],
7982         [[9],  [10], [13],  [14]],
7983         [[11], [12], [15],  [16]]]]
7984   ```
7985 
7986   the operator will return the following tensor of shape `[1 2 2 4]`:
7987 
7988   ```
7989   x = [[[[1, 2, 3, 4],
7990          [5, 6, 7, 8]],
7991         [[9, 10, 11, 12],
7992          [13, 14, 15, 16]]]]
7993   ```
7994 
7995   Args:
7996     input: A `Tensor`.
7997     block_size: An `int` that is `>= 2`. The size of the spatial block.
7998     data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
7999     name: A name for the operation (optional).
8000 
8001   Returns:
8002     A `Tensor`. Has the same type as `input`.
8003   """
8004   _ctx = _context._context
8005   if _ctx is None or not _ctx._eager_context.is_eager:
8006     block_size = _execute.make_int(block_size, "block_size")
8007     if data_format is None:
8008       data_format = "NHWC"
8009     data_format = _execute.make_str(data_format, "data_format")
8010     _, _, _op = _op_def_lib._apply_op_helper(
8011         "SpaceToDepth", input=input, block_size=block_size,
8012         data_format=data_format, name=name)
8013     _result = _op.outputs[:]
8014     _inputs_flat = _op.inputs
8015     _attrs = ("T", _op.get_attr("T"), "block_size",
8016               _op.get_attr("block_size"), "data_format",
8017               _op.get_attr("data_format"))
8018     _execute.record_gradient(
8019       "SpaceToDepth", _inputs_flat, _attrs, _result, name)
8020     _result, = _result
8021     return _result
8022 
8023   else:
8024     try:
8025       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8026         _ctx._context_handle, _ctx._eager_context.device_name, "SpaceToDepth",
8027         name, _ctx._post_execution_callbacks, input, "block_size", block_size,
8028         "data_format", data_format)
8029       return _result
8030     except _core._FallbackException:
8031       return space_to_depth_eager_fallback(
8032           input, block_size=block_size, data_format=data_format, name=name,
8033           ctx=_ctx)
8034     except _core._NotOkStatusException as e:
8035       if name is not None:
8036         message = e.message + " name: " + name
8037       else:
8038         message = e.message
8039       _six.raise_from(_core._status_to_exception(e.code, message), None)
8040 
8041 
8042 def space_to_depth_eager_fallback(input, block_size, data_format="NHWC", name=None, ctx=None):
8043   r"""This is the slowpath function for Eager mode.
8044   This is for function space_to_depth
8045   """
8046   _ctx = ctx if ctx else _context.context()
8047   block_size = _execute.make_int(block_size, "block_size")
8048   if data_format is None:
8049     data_format = "NHWC"
8050   data_format = _execute.make_str(data_format, "data_format")
8051   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8052   _inputs_flat = [input]
8053   _attrs = ("T", _attr_T, "block_size", block_size, "data_format",
8054   data_format)
8055   _result = _execute.execute(b"SpaceToDepth", 1, inputs=_inputs_flat,
8056                              attrs=_attrs, ctx=_ctx, name=name)
8057   _execute.record_gradient(
8058       "SpaceToDepth", _inputs_flat, _attrs, _result, name)
8059   _result, = _result
8060   return _result
8061 
8062 
8063 def split(axis, value, num_split, name=None):
8064   r"""Splits a tensor into `num_split` tensors along one dimension.
8065 
8066   Args:
8067     axis: A `Tensor` of type `int32`.
8068       0-D.  The dimension along which to split.  Must be in the range
8069       `[-rank(value), rank(value))`.
8070     value: A `Tensor`. The tensor to split.
8071     num_split: An `int` that is `>= 1`.
8072       The number of ways to split.  Must evenly divide
8073       `value.shape[split_dim]`.
8074     name: A name for the operation (optional).
8075 
8076   Returns:
8077     A list of `num_split` `Tensor` objects with the same type as `value`.
8078   """
8079   _ctx = _context._context
8080   if _ctx is None or not _ctx._eager_context.is_eager:
8081     num_split = _execute.make_int(num_split, "num_split")
8082     _, _, _op = _op_def_lib._apply_op_helper(
8083         "Split", split_dim=axis, value=value, num_split=num_split, name=name)
8084     _result = _op.outputs[:]
8085     _inputs_flat = _op.inputs
8086     _attrs = ("num_split", _op.get_attr("num_split"), "T", _op.get_attr("T"))
8087     _execute.record_gradient(
8088       "Split", _inputs_flat, _attrs, _result, name)
8089     return _result
8090 
8091   else:
8092     try:
8093       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8094         _ctx._context_handle, _ctx._eager_context.device_name, "Split", name,
8095         _ctx._post_execution_callbacks, axis, value, "num_split", num_split)
8096       return _result
8097     except _core._FallbackException:
8098       return split_eager_fallback(
8099           axis, value, num_split=num_split, name=name, ctx=_ctx)
8100     except _core._NotOkStatusException as e:
8101       if name is not None:
8102         message = e.message + " name: " + name
8103       else:
8104         message = e.message
8105       _six.raise_from(_core._status_to_exception(e.code, message), None)
8106 
8107 
8108 def split_eager_fallback(axis, value, num_split, name=None, ctx=None):
8109   r"""This is the slowpath function for Eager mode.
8110   This is for function split
8111   """
8112   _ctx = ctx if ctx else _context.context()
8113   num_split = _execute.make_int(num_split, "num_split")
8114   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
8115   axis = _ops.convert_to_tensor(axis, _dtypes.int32)
8116   _inputs_flat = [axis, value]
8117   _attrs = ("num_split", num_split, "T", _attr_T)
8118   _result = _execute.execute(b"Split", num_split, inputs=_inputs_flat,
8119                              attrs=_attrs, ctx=_ctx, name=name)
8120   _execute.record_gradient(
8121       "Split", _inputs_flat, _attrs, _result, name)
8122   return _result
8123 
8124 
8125 def split_v(value, size_splits, axis, num_split, name=None):
8126   r"""Splits a tensor into `num_split` tensors along one dimension.
8127 
8128   Args:
8129     value: A `Tensor`. The tensor to split.
8130     size_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8131       list containing the sizes of each output tensor along the split
8132       dimension. Must sum to the dimension of value along split_dim.
8133       Can contain one -1 indicating that dimension is to be inferred.
8134     axis: A `Tensor` of type `int32`.
8135       0-D.  The dimension along which to split.  Must be in the range
8136       `[-rank(value), rank(value))`.
8137     num_split: An `int` that is `>= 1`.
8138     name: A name for the operation (optional).
8139 
8140   Returns:
8141     A list of `num_split` `Tensor` objects with the same type as `value`.
8142   """
8143   _ctx = _context._context
8144   if _ctx is None or not _ctx._eager_context.is_eager:
8145     num_split = _execute.make_int(num_split, "num_split")
8146     _, _, _op = _op_def_lib._apply_op_helper(
8147         "SplitV", value=value, size_splits=size_splits, split_dim=axis,
8148         num_split=num_split, name=name)
8149     _result = _op.outputs[:]
8150     _inputs_flat = _op.inputs
8151     _attrs = ("num_split", _op.get_attr("num_split"), "T", _op.get_attr("T"),
8152               "Tlen", _op.get_attr("Tlen"))
8153     _execute.record_gradient(
8154       "SplitV", _inputs_flat, _attrs, _result, name)
8155     return _result
8156 
8157   else:
8158     try:
8159       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8160         _ctx._context_handle, _ctx._eager_context.device_name, "SplitV", name,
8161         _ctx._post_execution_callbacks, value, size_splits, axis, "num_split",
8162         num_split)
8163       return _result
8164     except _core._FallbackException:
8165       return split_v_eager_fallback(
8166           value, size_splits, axis, num_split=num_split, name=name, ctx=_ctx)
8167     except _core._NotOkStatusException as e:
8168       if name is not None:
8169         message = e.message + " name: " + name
8170       else:
8171         message = e.message
8172       _six.raise_from(_core._status_to_exception(e.code, message), None)
8173 
8174 
8175 def split_v_eager_fallback(value, size_splits, axis, num_split, name=None, ctx=None):
8176   r"""This is the slowpath function for Eager mode.
8177   This is for function split_v
8178   """
8179   _ctx = ctx if ctx else _context.context()
8180   num_split = _execute.make_int(num_split, "num_split")
8181   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
8182   _attr_Tlen, (size_splits,) = _execute.args_to_matching_eager([size_splits], _ctx, _dtypes.int64)
8183   axis = _ops.convert_to_tensor(axis, _dtypes.int32)
8184   _inputs_flat = [value, size_splits, axis]
8185   _attrs = ("num_split", num_split, "T", _attr_T, "Tlen", _attr_Tlen)
8186   _result = _execute.execute(b"SplitV", num_split, inputs=_inputs_flat,
8187                              attrs=_attrs, ctx=_ctx, name=name)
8188   _execute.record_gradient(
8189       "SplitV", _inputs_flat, _attrs, _result, name)
8190   return _result
8191 
8192 
8193 def squeeze(input, axis=[], name=None):
8194   r"""Removes dimensions of size 1 from the shape of a tensor.
8195 
8196   Given a tensor `input`, this operation returns a tensor of the same type with
8197   all dimensions of size 1 removed. If you don't want to remove all size 1
8198   dimensions, you can remove specific size 1 dimensions by specifying
8199   `axis`.
8200 
8201   For example:
8202 
8203   ```
8204   # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
8205   shape(squeeze(t)) ==> [2, 3]
8206   ```
8207 
8208   Or, to remove specific size 1 dimensions:
8209 
8210   ```
8211   # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
8212   shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
8213   ```
8214 
8215   Args:
8216     input: A `Tensor`. The `input` to squeeze.
8217     axis: An optional list of `ints`. Defaults to `[]`.
8218       If specified, only squeezes the dimensions listed. The dimension
8219       index starts at 0. It is an error to squeeze a dimension that is not 1. Must
8220       be in the range `[-rank(input), rank(input))`.
8221     name: A name for the operation (optional).
8222 
8223   Returns:
8224     A `Tensor`. Has the same type as `input`.
8225   """
8226   _ctx = _context._context
8227   if _ctx is None or not _ctx._eager_context.is_eager:
8228     if axis is None:
8229       axis = []
8230     if not isinstance(axis, (list, tuple)):
8231       raise TypeError(
8232           "Expected list for 'axis' argument to "
8233           "'squeeze' Op, not %r." % axis)
8234     axis = [_execute.make_int(_i, "axis") for _i in axis]
8235     _, _, _op = _op_def_lib._apply_op_helper(
8236         "Squeeze", input=input, squeeze_dims=axis, name=name)
8237     _result = _op.outputs[:]
8238     _inputs_flat = _op.inputs
8239     _attrs = ("T", _op.get_attr("T"), "squeeze_dims",
8240               _op.get_attr("squeeze_dims"))
8241     _execute.record_gradient(
8242       "Squeeze", _inputs_flat, _attrs, _result, name)
8243     _result, = _result
8244     return _result
8245 
8246   else:
8247     try:
8248       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8249         _ctx._context_handle, _ctx._eager_context.device_name, "Squeeze",
8250         name, _ctx._post_execution_callbacks, input, "squeeze_dims", axis)
8251       return _result
8252     except _core._FallbackException:
8253       return squeeze_eager_fallback(
8254           input, axis=axis, name=name, ctx=_ctx)
8255     except _core._NotOkStatusException as e:
8256       if name is not None:
8257         message = e.message + " name: " + name
8258       else:
8259         message = e.message
8260       _six.raise_from(_core._status_to_exception(e.code, message), None)
8261 
8262 
8263 def squeeze_eager_fallback(input, axis=[], name=None, ctx=None):
8264   r"""This is the slowpath function for Eager mode.
8265   This is for function squeeze
8266   """
8267   _ctx = ctx if ctx else _context.context()
8268   if axis is None:
8269     axis = []
8270   if not isinstance(axis, (list, tuple)):
8271     raise TypeError(
8272         "Expected list for 'axis' argument to "
8273         "'squeeze' Op, not %r." % axis)
8274   axis = [_execute.make_int(_i, "axis") for _i in axis]
8275   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8276   _inputs_flat = [input]
8277   _attrs = ("T", _attr_T, "squeeze_dims", axis)
8278   _result = _execute.execute(b"Squeeze", 1, inputs=_inputs_flat, attrs=_attrs,
8279                              ctx=_ctx, name=name)
8280   _execute.record_gradient(
8281       "Squeeze", _inputs_flat, _attrs, _result, name)
8282   _result, = _result
8283   return _result
8284 
8285 
8286 @tf_export('stop_gradient')
8287 def stop_gradient(input, name=None):
8288   r"""Stops gradient computation.
8289 
8290   When executed in a graph, this op outputs its input tensor as-is.
8291 
8292   When building ops to compute gradients, this op prevents the contribution of
8293   its inputs to be taken into account.  Normally, the gradient generator adds ops
8294   to a graph to compute the derivatives of a specified 'loss' by recursively
8295   finding out inputs that contributed to its computation.  If you insert this op
8296   in the graph it inputs are masked from the gradient generator.  They are not
8297   taken into account for computing gradients.
8298 
8299   This is useful any time you want to compute a value with TensorFlow but need
8300   to pretend that the value was a constant. Some examples include:
8301 
8302   *  The *EM* algorithm where the *M-step* should not involve backpropagation
8303      through the output of the *E-step*.
8304   *  Contrastive divergence training of Boltzmann machines where, when
8305      differentiating the energy function, the training must not backpropagate
8306      through the graph that generated the samples from the model.
8307   *  Adversarial training, where no backprop should happen through the adversarial
8308      example generation process.
8309 
8310   Args:
8311     input: A `Tensor`.
8312     name: A name for the operation (optional).
8313 
8314   Returns:
8315     A `Tensor`. Has the same type as `input`.
8316   """
8317   _ctx = _context._context
8318   if _ctx is None or not _ctx._eager_context.is_eager:
8319     _, _, _op = _op_def_lib._apply_op_helper(
8320         "StopGradient", input=input, name=name)
8321     _result = _op.outputs[:]
8322     _inputs_flat = _op.inputs
8323     _attrs = ("T", _op.get_attr("T"))
8324     _execute.record_gradient(
8325       "StopGradient", _inputs_flat, _attrs, _result, name)
8326     _result, = _result
8327     return _result
8328 
8329   else:
8330     try:
8331       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8332         _ctx._context_handle, _ctx._eager_context.device_name, "StopGradient",
8333         name, _ctx._post_execution_callbacks, input)
8334       return _result
8335     except _core._FallbackException:
8336       return stop_gradient_eager_fallback(
8337           input, name=name, ctx=_ctx)
8338     except _core._NotOkStatusException as e:
8339       if name is not None:
8340         message = e.message + " name: " + name
8341       else:
8342         message = e.message
8343       _six.raise_from(_core._status_to_exception(e.code, message), None)
8344 
8345 
8346 def stop_gradient_eager_fallback(input, name=None, ctx=None):
8347   r"""This is the slowpath function for Eager mode.
8348   This is for function stop_gradient
8349   """
8350   _ctx = ctx if ctx else _context.context()
8351   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8352   _inputs_flat = [input]
8353   _attrs = ("T", _attr_T)
8354   _result = _execute.execute(b"StopGradient", 1, inputs=_inputs_flat,
8355                              attrs=_attrs, ctx=_ctx, name=name)
8356   _execute.record_gradient(
8357       "StopGradient", _inputs_flat, _attrs, _result, name)
8358   _result, = _result
8359   return _result
8360 
8361 
8362 def strided_slice(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):
8363   r"""Return a strided slice from `input`.
8364 
8365   Note, most python users will want to use the Python `Tensor.__getitem__`
8366   or `Variable.__getitem__` rather than this op directly.
8367 
8368   The goal of this op is to produce a new tensor with a subset of
8369   the elements from the `n` dimensional `input` tensor. The subset is chosen using
8370   a sequence of `m` sparse range specifications encoded into the arguments
8371   of this function. Note, in some cases
8372   `m` could be equal to `n`, but this need not be the case. Each
8373   range specification entry can be one of the following:
8374 
8375   - An ellipsis (...). Ellipses are used to imply zero or more
8376     dimensions of full-dimension selection and are produced using
8377     `ellipsis_mask`. For example, `foo[...]` is the identity slice.
8378 
8379   - A new axis. This is used to insert a new shape=1 dimension and is
8380     produced using `new_axis_mask`. For example, `foo[:, ...]` where
8381     `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
8382 
8383 
8384   - A range `begin:end:stride`. This is used to specify how much to choose from
8385     a given dimension. `stride` can be any integer but 0.  `begin` is an integer
8386     which represents the index of the first value to select while `end` represents
8387     the index of the last value to select. The number of values selected in each
8388     dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
8389     `begin` and `end` can be negative where `-1` is the last element, `-2` is
8390     the second to last. `begin_mask` controls whether to replace the explicitly
8391     given `begin` with an implicit effective value of `0` if `stride > 0` and
8392     `-1` if `stride < 0`. `end_mask` is analogous but produces the number
8393     required to create the largest open interval. For example, given a shape
8394     `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
8395     not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
8396     and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
8397     first dimension of a tensor while dropping the last two (in the original
8398     order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
8399 
8400   - A single index. This is used to keep only elements that have a given
8401     index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
8402     shape `(6,)` tensor. This is encoded in `begin` and `end` and
8403     `shrink_axis_mask`.
8404 
8405   Each conceptual range specification is encoded in the op's argument. This
8406   encoding is best understand by considering a non-trivial example. In
8407   particular,
8408   `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
8409 
8410   ```
8411   begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
8412   end = [2, 4, x, x, -3, x]
8413   strides = [1, 1, x, x, -1, 1]
8414   begin_mask = 1<<4 | 1 << 5 = 48
8415   end_mask = 1<<5 = 32
8416   ellipsis_mask = 1<<3 = 8
8417   new_axis_mask = 1<<2 4
8418   shrink_axis_mask = 1<<0
8419   ```
8420 
8421   In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
8422   the slice becomes (2, 1, 5, 5, 2, 5).
8423   Let us walk step by step through each argument specification.
8424 
8425   1.  The first argument in the example slice is turned into `begin = 1` and
8426   `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
8427   also set the appropriate bit in `shrink_axis_mask`.
8428 
8429   2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
8430   zero bits contributed.
8431 
8432   3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
8433   dimension in the final shape. Dummy values are contributed to begin,
8434   end and stride, while the new_axis_mask bit is set.
8435 
8436   4. `...` grab the full ranges from as many dimensions as needed to
8437   fully specify a slice for every dimension of the input shape.
8438 
8439   5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
8440   with a dimension that has shape `s` is converted to a positive index
8441   `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
8442   is done internally so begin, end and strides receive x, -3, and -1.
8443   The appropriate begin_mask bit is set to indicate the start range is the
8444   full range (ignoring the x).
8445 
8446   6. `:` indicates that the entire contents of the corresponding dimension
8447   is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
8448   receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
8449   `end_mask` are also set.
8450 
8451   *Requirements*:
8452     `0 != strides[i] for i in [0, m)`
8453     `ellipsis_mask must be a power of two (only one ellipsis)`
8454 
8455   Args:
8456     input: A `Tensor`.
8457     begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8458       `begin[k]` specifies the offset into the `k`th range specification.
8459       The exact dimension this corresponds to will be determined by context.
8460       Out-of-bounds values will be silently clamped. If the `k`th bit of
8461       `begin_mask` then `begin[k]` is ignored and the full range of the
8462       appropriate dimension is used instead. Negative values causes indexing
8463       to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
8464     end: A `Tensor`. Must have the same type as `begin`.
8465       `end[i]` is like `begin` with the exception that `end_mask` is
8466       used to determine full ranges.
8467     strides: A `Tensor`. Must have the same type as `begin`.
8468       `strides[i]` specifies the increment in the `i`th specification
8469       after extracting a given element. Negative indices will reverse
8470       the original order. Out or range values are
8471       clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
8472     begin_mask: An optional `int`. Defaults to `0`.
8473       a bitmask where a bit i being 1 means to ignore the begin
8474       value and instead use the largest interval possible. At runtime
8475       begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
8476       `[-1, n-1]` if `stride[i] < 0`
8477     end_mask: An optional `int`. Defaults to `0`. analogous to `begin_mask`
8478     ellipsis_mask: An optional `int`. Defaults to `0`.
8479       a bitmask where bit `i` being 1 means the `i`th
8480       position is actually an ellipsis. One bit at most can be 1.
8481       If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
8482       is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
8483       implicitly creates as many range specifications as necessary to fully
8484       specify the sliced range for every dimension. For example for a 4-dimensional
8485       tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
8486     new_axis_mask: An optional `int`. Defaults to `0`.
8487       a bitmask where bit `i` being 1 means the `i`th
8488       specification creates a new shape 1 dimension. For example
8489       `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
8490     shrink_axis_mask: An optional `int`. Defaults to `0`.
8491       a bitmask where bit `i` implies that the `i`th
8492       specification should shrink the dimensionality. begin and end
8493       must imply a slice of size 1 in the dimension. For example in
8494       python one might do `foo[:, 3, :]` which would result in
8495       `shrink_axis_mask` being 2.
8496     name: A name for the operation (optional).
8497 
8498   Returns:
8499     A `Tensor`. Has the same type as `input`.
8500   """
8501   _ctx = _context._context
8502   if _ctx is None or not _ctx._eager_context.is_eager:
8503     if begin_mask is None:
8504       begin_mask = 0
8505     begin_mask = _execute.make_int(begin_mask, "begin_mask")
8506     if end_mask is None:
8507       end_mask = 0
8508     end_mask = _execute.make_int(end_mask, "end_mask")
8509     if ellipsis_mask is None:
8510       ellipsis_mask = 0
8511     ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
8512     if new_axis_mask is None:
8513       new_axis_mask = 0
8514     new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
8515     if shrink_axis_mask is None:
8516       shrink_axis_mask = 0
8517     shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
8518     _, _, _op = _op_def_lib._apply_op_helper(
8519         "StridedSlice", input=input, begin=begin, end=end, strides=strides,
8520         begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask,
8521         new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
8522         name=name)
8523     _result = _op.outputs[:]
8524     _inputs_flat = _op.inputs
8525     _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"),
8526               "begin_mask", _op.get_attr("begin_mask"), "end_mask",
8527               _op.get_attr("end_mask"), "ellipsis_mask",
8528               _op.get_attr("ellipsis_mask"), "new_axis_mask",
8529               _op.get_attr("new_axis_mask"), "shrink_axis_mask",
8530               _op.get_attr("shrink_axis_mask"))
8531     _execute.record_gradient(
8532       "StridedSlice", _inputs_flat, _attrs, _result, name)
8533     _result, = _result
8534     return _result
8535 
8536   else:
8537     try:
8538       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8539         _ctx._context_handle, _ctx._eager_context.device_name, "StridedSlice",
8540         name, _ctx._post_execution_callbacks, input, begin, end, strides,
8541         "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask",
8542         ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask",
8543         shrink_axis_mask)
8544       return _result
8545     except _core._FallbackException:
8546       return strided_slice_eager_fallback(
8547           input, begin, end, strides, begin_mask=begin_mask,
8548           end_mask=end_mask, ellipsis_mask=ellipsis_mask,
8549           new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
8550           name=name, ctx=_ctx)
8551     except _core._NotOkStatusException as e:
8552       if name is not None:
8553         message = e.message + " name: " + name
8554       else:
8555         message = e.message
8556       _six.raise_from(_core._status_to_exception(e.code, message), None)
8557 
8558 
8559 def strided_slice_eager_fallback(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):
8560   r"""This is the slowpath function for Eager mode.
8561   This is for function strided_slice
8562   """
8563   _ctx = ctx if ctx else _context.context()
8564   if begin_mask is None:
8565     begin_mask = 0
8566   begin_mask = _execute.make_int(begin_mask, "begin_mask")
8567   if end_mask is None:
8568     end_mask = 0
8569   end_mask = _execute.make_int(end_mask, "end_mask")
8570   if ellipsis_mask is None:
8571     ellipsis_mask = 0
8572   ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
8573   if new_axis_mask is None:
8574     new_axis_mask = 0
8575   new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
8576   if shrink_axis_mask is None:
8577     shrink_axis_mask = 0
8578   shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
8579   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8580   _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx)
8581   (begin, end, strides) = _inputs_Index
8582   _inputs_flat = [input, begin, end, strides]
8583   _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask,
8584   "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
8585   new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
8586   _result = _execute.execute(b"StridedSlice", 1, inputs=_inputs_flat,
8587                              attrs=_attrs, ctx=_ctx, name=name)
8588   _execute.record_gradient(
8589       "StridedSlice", _inputs_flat, _attrs, _result, name)
8590   _result, = _result
8591   return _result
8592 
8593 
8594 def strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):
8595   r"""Assign `value` to the sliced l-value reference of `ref`.
8596 
8597   The values of `value` are assigned to the positions in the variable
8598   `ref` that are selected by the slice parameters. The slice parameters
8599   `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
8600 
8601   NOTE this op currently does not support broadcasting and so `value`'s
8602   shape must be exactly the shape produced by the slice of `ref`.
8603 
8604   Args:
8605     ref: A mutable `Tensor`.
8606     begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8607     end: A `Tensor`. Must have the same type as `begin`.
8608     strides: A `Tensor`. Must have the same type as `begin`.
8609     value: A `Tensor`. Must have the same type as `ref`.
8610     begin_mask: An optional `int`. Defaults to `0`.
8611     end_mask: An optional `int`. Defaults to `0`.
8612     ellipsis_mask: An optional `int`. Defaults to `0`.
8613     new_axis_mask: An optional `int`. Defaults to `0`.
8614     shrink_axis_mask: An optional `int`. Defaults to `0`.
8615     name: A name for the operation (optional).
8616 
8617   Returns:
8618     A mutable `Tensor`. Has the same type as `ref`.
8619   """
8620   _ctx = _context._context
8621   if _ctx is None or not _ctx._eager_context.is_eager:
8622     if begin_mask is None:
8623       begin_mask = 0
8624     begin_mask = _execute.make_int(begin_mask, "begin_mask")
8625     if end_mask is None:
8626       end_mask = 0
8627     end_mask = _execute.make_int(end_mask, "end_mask")
8628     if ellipsis_mask is None:
8629       ellipsis_mask = 0
8630     ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
8631     if new_axis_mask is None:
8632       new_axis_mask = 0
8633     new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
8634     if shrink_axis_mask is None:
8635       shrink_axis_mask = 0
8636     shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
8637     _, _, _op = _op_def_lib._apply_op_helper(
8638         "StridedSliceAssign", ref=ref, begin=begin, end=end, strides=strides,
8639         value=value, begin_mask=begin_mask, end_mask=end_mask,
8640         ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask,
8641         shrink_axis_mask=shrink_axis_mask, name=name)
8642     _result = _op.outputs[:]
8643     _inputs_flat = _op.inputs
8644     _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"),
8645               "begin_mask", _op.get_attr("begin_mask"), "end_mask",
8646               _op.get_attr("end_mask"), "ellipsis_mask",
8647               _op.get_attr("ellipsis_mask"), "new_axis_mask",
8648               _op.get_attr("new_axis_mask"), "shrink_axis_mask",
8649               _op.get_attr("shrink_axis_mask"))
8650     _execute.record_gradient(
8651       "StridedSliceAssign", _inputs_flat, _attrs, _result, name)
8652     _result, = _result
8653     return _result
8654 
8655   else:
8656     raise RuntimeError("strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.")
8657 
8658 
8659   raise RuntimeError("strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.")
8660 
8661 def strided_slice_grad(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):
8662   r"""Returns the gradient of `StridedSlice`.
8663 
8664   Since `StridedSlice` cuts out pieces of its `input` which is size
8665   `shape`, its gradient will have the same shape (which is passed here
8666   as `shape`). The gradient will be zero in any element that the slice
8667   does not select.
8668 
8669   Arguments are the same as StridedSliceGrad with the exception that
8670   `dy` is the input gradient to be propagated and `shape` is the
8671   shape of `StridedSlice`'s `input`.
8672 
8673   Args:
8674     shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8675     begin: A `Tensor`. Must have the same type as `shape`.
8676     end: A `Tensor`. Must have the same type as `shape`.
8677     strides: A `Tensor`. Must have the same type as `shape`.
8678     dy: A `Tensor`.
8679     begin_mask: An optional `int`. Defaults to `0`.
8680     end_mask: An optional `int`. Defaults to `0`.
8681     ellipsis_mask: An optional `int`. Defaults to `0`.
8682     new_axis_mask: An optional `int`. Defaults to `0`.
8683     shrink_axis_mask: An optional `int`. Defaults to `0`.
8684     name: A name for the operation (optional).
8685 
8686   Returns:
8687     A `Tensor`. Has the same type as `dy`.
8688   """
8689   _ctx = _context._context
8690   if _ctx is None or not _ctx._eager_context.is_eager:
8691     if begin_mask is None:
8692       begin_mask = 0
8693     begin_mask = _execute.make_int(begin_mask, "begin_mask")
8694     if end_mask is None:
8695       end_mask = 0
8696     end_mask = _execute.make_int(end_mask, "end_mask")
8697     if ellipsis_mask is None:
8698       ellipsis_mask = 0
8699     ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
8700     if new_axis_mask is None:
8701       new_axis_mask = 0
8702     new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
8703     if shrink_axis_mask is None:
8704       shrink_axis_mask = 0
8705     shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
8706     _, _, _op = _op_def_lib._apply_op_helper(
8707         "StridedSliceGrad", shape=shape, begin=begin, end=end,
8708         strides=strides, dy=dy, begin_mask=begin_mask, end_mask=end_mask,
8709         ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask,
8710         shrink_axis_mask=shrink_axis_mask, name=name)
8711     _result = _op.outputs[:]
8712     _inputs_flat = _op.inputs
8713     _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"),
8714               "begin_mask", _op.get_attr("begin_mask"), "end_mask",
8715               _op.get_attr("end_mask"), "ellipsis_mask",
8716               _op.get_attr("ellipsis_mask"), "new_axis_mask",
8717               _op.get_attr("new_axis_mask"), "shrink_axis_mask",
8718               _op.get_attr("shrink_axis_mask"))
8719     _execute.record_gradient(
8720       "StridedSliceGrad", _inputs_flat, _attrs, _result, name)
8721     _result, = _result
8722     return _result
8723 
8724   else:
8725     try:
8726       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8727         _ctx._context_handle, _ctx._eager_context.device_name,
8728         "StridedSliceGrad", name, _ctx._post_execution_callbacks, shape,
8729         begin, end, strides, dy, "begin_mask", begin_mask, "end_mask",
8730         end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
8731         new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
8732       return _result
8733     except _core._FallbackException:
8734       return strided_slice_grad_eager_fallback(
8735           shape, begin, end, strides, dy, begin_mask=begin_mask,
8736           end_mask=end_mask, ellipsis_mask=ellipsis_mask,
8737           new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
8738           name=name, ctx=_ctx)
8739     except _core._NotOkStatusException as e:
8740       if name is not None:
8741         message = e.message + " name: " + name
8742       else:
8743         message = e.message
8744       _six.raise_from(_core._status_to_exception(e.code, message), None)
8745 
8746 
8747 def strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):
8748   r"""This is the slowpath function for Eager mode.
8749   This is for function strided_slice_grad
8750   """
8751   _ctx = ctx if ctx else _context.context()
8752   if begin_mask is None:
8753     begin_mask = 0
8754   begin_mask = _execute.make_int(begin_mask, "begin_mask")
8755   if end_mask is None:
8756     end_mask = 0
8757   end_mask = _execute.make_int(end_mask, "end_mask")
8758   if ellipsis_mask is None:
8759     ellipsis_mask = 0
8760   ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
8761   if new_axis_mask is None:
8762     new_axis_mask = 0
8763   new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
8764   if shrink_axis_mask is None:
8765     shrink_axis_mask = 0
8766   shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
8767   _attr_T, (dy,) = _execute.args_to_matching_eager([dy], _ctx)
8768   _attr_Index, _inputs_Index = _execute.args_to_matching_eager([shape, begin, end, strides], _ctx)
8769   (shape, begin, end, strides) = _inputs_Index
8770   _inputs_flat = [shape, begin, end, strides, dy]
8771   _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask,
8772   "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
8773   new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
8774   _result = _execute.execute(b"StridedSliceGrad", 1, inputs=_inputs_flat,
8775                              attrs=_attrs, ctx=_ctx, name=name)
8776   _execute.record_gradient(
8777       "StridedSliceGrad", _inputs_flat, _attrs, _result, name)
8778   _result, = _result
8779   return _result
8780 
8781 
8782 @tf_export('tile', 'manip.tile')
8783 @deprecated_endpoints('manip.tile')
8784 def tile(input, multiples, name=None):
8785   r"""Constructs a tensor by tiling a given tensor.
8786 
8787   This operation creates a new tensor by replicating `input` `multiples` times.
8788   The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
8789   and the values of `input` are replicated `multiples[i]` times along the 'i'th
8790   dimension. For example, tiling `[a b c d]` by `[2]` produces
8791   `[a b c d a b c d]`.
8792 
8793   Args:
8794     input: A `Tensor`. 1-D or higher.
8795     multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8796       1-D. Length must be the same as the number of dimensions in `input`
8797     name: A name for the operation (optional).
8798 
8799   Returns:
8800     A `Tensor`. Has the same type as `input`.
8801   """
8802   _ctx = _context._context
8803   if _ctx is None or not _ctx._eager_context.is_eager:
8804     _, _, _op = _op_def_lib._apply_op_helper(
8805         "Tile", input=input, multiples=multiples, name=name)
8806     _result = _op.outputs[:]
8807     _inputs_flat = _op.inputs
8808     _attrs = ("T", _op.get_attr("T"), "Tmultiples",
8809               _op.get_attr("Tmultiples"))
8810     _execute.record_gradient(
8811       "Tile", _inputs_flat, _attrs, _result, name)
8812     _result, = _result
8813     return _result
8814 
8815   else:
8816     try:
8817       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8818         _ctx._context_handle, _ctx._eager_context.device_name, "Tile", name,
8819         _ctx._post_execution_callbacks, input, multiples)
8820       return _result
8821     except _core._FallbackException:
8822       return tile_eager_fallback(
8823           input, multiples, name=name, ctx=_ctx)
8824     except _core._NotOkStatusException as e:
8825       if name is not None:
8826         message = e.message + " name: " + name
8827       else:
8828         message = e.message
8829       _six.raise_from(_core._status_to_exception(e.code, message), None)
8830 
8831 
8832 def tile_eager_fallback(input, multiples, name=None, ctx=None):
8833   r"""This is the slowpath function for Eager mode.
8834   This is for function tile
8835   """
8836   _ctx = ctx if ctx else _context.context()
8837   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8838   _attr_Tmultiples, (multiples,) = _execute.args_to_matching_eager([multiples], _ctx, _dtypes.int32)
8839   _inputs_flat = [input, multiples]
8840   _attrs = ("T", _attr_T, "Tmultiples", _attr_Tmultiples)
8841   _result = _execute.execute(b"Tile", 1, inputs=_inputs_flat, attrs=_attrs,
8842                              ctx=_ctx, name=name)
8843   _execute.record_gradient(
8844       "Tile", _inputs_flat, _attrs, _result, name)
8845   _result, = _result
8846   return _result
8847 
8848 
8849 def tile_grad(input, multiples, name=None):
8850   r"""Returns the gradient of `Tile`.
8851 
8852   Since `Tile` takes an input and repeats the input `multiples` times
8853   along each dimension, `TileGrad` takes in `multiples` and aggregates
8854   each repeated tile of `input` into `output`.
8855 
8856   Args:
8857     input: A `Tensor`.
8858     multiples: A `Tensor` of type `int32`.
8859     name: A name for the operation (optional).
8860 
8861   Returns:
8862     A `Tensor`. Has the same type as `input`.
8863   """
8864   _ctx = _context._context
8865   if _ctx is None or not _ctx._eager_context.is_eager:
8866     _, _, _op = _op_def_lib._apply_op_helper(
8867         "TileGrad", input=input, multiples=multiples, name=name)
8868     _result = _op.outputs[:]
8869     _inputs_flat = _op.inputs
8870     _attrs = ("T", _op.get_attr("T"))
8871     _execute.record_gradient(
8872       "TileGrad", _inputs_flat, _attrs, _result, name)
8873     _result, = _result
8874     return _result
8875 
8876   else:
8877     try:
8878       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8879         _ctx._context_handle, _ctx._eager_context.device_name, "TileGrad",
8880         name, _ctx._post_execution_callbacks, input, multiples)
8881       return _result
8882     except _core._FallbackException:
8883       return tile_grad_eager_fallback(
8884           input, multiples, name=name, ctx=_ctx)
8885     except _core._NotOkStatusException as e:
8886       if name is not None:
8887         message = e.message + " name: " + name
8888       else:
8889         message = e.message
8890       _six.raise_from(_core._status_to_exception(e.code, message), None)
8891 
8892 
8893 def tile_grad_eager_fallback(input, multiples, name=None, ctx=None):
8894   r"""This is the slowpath function for Eager mode.
8895   This is for function tile_grad
8896   """
8897   _ctx = ctx if ctx else _context.context()
8898   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8899   multiples = _ops.convert_to_tensor(multiples, _dtypes.int32)
8900   _inputs_flat = [input, multiples]
8901   _attrs = ("T", _attr_T)
8902   _result = _execute.execute(b"TileGrad", 1, inputs=_inputs_flat,
8903                              attrs=_attrs, ctx=_ctx, name=name)
8904   _execute.record_gradient(
8905       "TileGrad", _inputs_flat, _attrs, _result, name)
8906   _result, = _result
8907   return _result
8908 
8909 
8910 def transpose(x, perm, name=None):
8911   r"""Shuffle dimensions of x according to a permutation.
8912 
8913   The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
8914     `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
8915 
8916   Args:
8917     x: A `Tensor`.
8918     perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8919     name: A name for the operation (optional).
8920 
8921   Returns:
8922     A `Tensor`. Has the same type as `x`.
8923   """
8924   _ctx = _context._context
8925   if _ctx is None or not _ctx._eager_context.is_eager:
8926     _, _, _op = _op_def_lib._apply_op_helper(
8927         "Transpose", x=x, perm=perm, name=name)
8928     _result = _op.outputs[:]
8929     _inputs_flat = _op.inputs
8930     _attrs = ("T", _op.get_attr("T"), "Tperm", _op.get_attr("Tperm"))
8931     _execute.record_gradient(
8932       "Transpose", _inputs_flat, _attrs, _result, name)
8933     _result, = _result
8934     return _result
8935 
8936   else:
8937     try:
8938       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8939         _ctx._context_handle, _ctx._eager_context.device_name, "Transpose",
8940         name, _ctx._post_execution_callbacks, x, perm)
8941       return _result
8942     except _core._FallbackException:
8943       return transpose_eager_fallback(
8944           x, perm, name=name, ctx=_ctx)
8945     except _core._NotOkStatusException as e:
8946       if name is not None:
8947         message = e.message + " name: " + name
8948       else:
8949         message = e.message
8950       _six.raise_from(_core._status_to_exception(e.code, message), None)
8951 
8952 
8953 def transpose_eager_fallback(x, perm, name=None, ctx=None):
8954   r"""This is the slowpath function for Eager mode.
8955   This is for function transpose
8956   """
8957   _ctx = ctx if ctx else _context.context()
8958   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
8959   _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32)
8960   _inputs_flat = [x, perm]
8961   _attrs = ("T", _attr_T, "Tperm", _attr_Tperm)
8962   _result = _execute.execute(b"Transpose", 1, inputs=_inputs_flat,
8963                              attrs=_attrs, ctx=_ctx, name=name)
8964   _execute.record_gradient(
8965       "Transpose", _inputs_flat, _attrs, _result, name)
8966   _result, = _result
8967   return _result
8968 
8969 
8970 _unique_outputs = ["y", "idx"]
8971 _UniqueOutput = _collections.namedtuple(
8972     "Unique", _unique_outputs)
8973 
8974 
8975 def unique(x, out_idx=_dtypes.int32, name=None):
8976   r"""Finds unique elements in a 1-D tensor.
8977 
8978   This operation returns a tensor `y` containing all of the unique elements of `x`
8979   sorted in the same order that they occur in `x`. This operation also returns a
8980   tensor `idx` the same size as `x` that contains the index of each value of `x`
8981   in the unique output `y`. In other words:
8982 
8983   `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
8984 
8985   For example:
8986 
8987   ```
8988   # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
8989   y, idx = unique(x)
8990   y ==> [1, 2, 4, 7, 8]
8991   idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
8992   ```
8993 
8994   Args:
8995     x: A `Tensor`. 1-D.
8996     out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
8997     name: A name for the operation (optional).
8998 
8999   Returns:
9000     A tuple of `Tensor` objects (y, idx).
9001 
9002     y: A `Tensor`. Has the same type as `x`.
9003     idx: A `Tensor` of type `out_idx`.
9004   """
9005   _ctx = _context._context
9006   if _ctx is None or not _ctx._eager_context.is_eager:
9007     if out_idx is None:
9008       out_idx = _dtypes.int32
9009     out_idx = _execute.make_type(out_idx, "out_idx")
9010     _, _, _op = _op_def_lib._apply_op_helper(
9011         "Unique", x=x, out_idx=out_idx, name=name)
9012     _result = _op.outputs[:]
9013     _inputs_flat = _op.inputs
9014     _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx"))
9015     _execute.record_gradient(
9016       "Unique", _inputs_flat, _attrs, _result, name)
9017     _result = _UniqueOutput._make(_result)
9018     return _result
9019 
9020   else:
9021     try:
9022       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9023         _ctx._context_handle, _ctx._eager_context.device_name, "Unique", name,
9024         _ctx._post_execution_callbacks, x, "out_idx", out_idx)
9025       _result = _UniqueOutput._make(_result)
9026       return _result
9027     except _core._FallbackException:
9028       return unique_eager_fallback(
9029           x, out_idx=out_idx, name=name, ctx=_ctx)
9030     except _core._NotOkStatusException as e:
9031       if name is not None:
9032         message = e.message + " name: " + name
9033       else:
9034         message = e.message
9035       _six.raise_from(_core._status_to_exception(e.code, message), None)
9036 
9037 
9038 def unique_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None):
9039   r"""This is the slowpath function for Eager mode.
9040   This is for function unique
9041   """
9042   _ctx = ctx if ctx else _context.context()
9043   if out_idx is None:
9044     out_idx = _dtypes.int32
9045   out_idx = _execute.make_type(out_idx, "out_idx")
9046   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
9047   _inputs_flat = [x]
9048   _attrs = ("T", _attr_T, "out_idx", out_idx)
9049   _result = _execute.execute(b"Unique", 2, inputs=_inputs_flat, attrs=_attrs,
9050                              ctx=_ctx, name=name)
9051   _execute.record_gradient(
9052       "Unique", _inputs_flat, _attrs, _result, name)
9053   _result = _UniqueOutput._make(_result)
9054   return _result
9055 
9056 
9057 _unique_v2_outputs = ["y", "idx"]
9058 _UniqueV2Output = _collections.namedtuple(
9059     "UniqueV2", _unique_v2_outputs)
9060 
9061 
9062 def unique_v2(x, axis, out_idx=_dtypes.int32, name=None):
9063   r"""Finds unique elements along an axis of a tensor.
9064 
9065   This operation either returns a tensor `y` containing unique elements
9066   along the `axis` of a tensor. The returned unique elements is sorted
9067   in the same order as they occur along `axis` in `x`.
9068   This operation also returns a tensor `idx` that is the same size as
9069   the number of the elements in `x` along the `axis` dimension. It
9070   contains the index in the unique output `y`.
9071   In other words, for an `1-D` tensor `x` with `axis = None:
9072 
9073   `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
9074 
9075   For example:
9076 
9077   ```
9078   # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
9079   y, idx = unique(x)
9080   y ==> [1, 2, 4, 7, 8]
9081   idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
9082   ```
9083 
9084   For an `2-D` tensor `x` with `axis = 0`:
9085 
9086   ```
9087   # tensor 'x' is [[1, 0, 0],
9088   #                [1, 0, 0],
9089   #                [2, 0, 0]]
9090   y, idx = unique(x, axis=0)
9091   y ==> [[1, 0, 0],
9092          [2, 0, 0]]
9093   idx ==> [0, 0, 1]
9094   ```
9095 
9096   For an `2-D` tensor `x` with `axis = 1`:
9097 
9098   ```
9099   # tensor 'x' is [[1, 0, 0],
9100   #                [1, 0, 0],
9101   #                [2, 0, 0]]
9102   y, idx = unique(x, axis=1)
9103   y ==> [[1, 0],
9104          [1, 0],
9105          [2, 0]]
9106   idx ==> [0, 1, 1]
9107   ```
9108 
9109   Args:
9110     x: A `Tensor`. A `Tensor`.
9111     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
9112       A `Tensor` of type `int32` (default: None). The axis of the Tensor to
9113       find the unique elements.
9114     out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
9115     name: A name for the operation (optional).
9116 
9117   Returns:
9118     A tuple of `Tensor` objects (y, idx).
9119 
9120     y: A `Tensor`. Has the same type as `x`.
9121     idx: A `Tensor` of type `out_idx`.
9122   """
9123   _ctx = _context._context
9124   if _ctx is None or not _ctx._eager_context.is_eager:
9125     if out_idx is None:
9126       out_idx = _dtypes.int32
9127     out_idx = _execute.make_type(out_idx, "out_idx")
9128     _, _, _op = _op_def_lib._apply_op_helper(
9129         "UniqueV2", x=x, axis=axis, out_idx=out_idx, name=name)
9130     _result = _op.outputs[:]
9131     _inputs_flat = _op.inputs
9132     _attrs = ("T", _op.get_attr("T"), "Taxis", _op.get_attr("Taxis"),
9133               "out_idx", _op.get_attr("out_idx"))
9134     _execute.record_gradient(
9135       "UniqueV2", _inputs_flat, _attrs, _result, name)
9136     _result = _UniqueV2Output._make(_result)
9137     return _result
9138 
9139   else:
9140     try:
9141       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9142         _ctx._context_handle, _ctx._eager_context.device_name, "UniqueV2",
9143         name, _ctx._post_execution_callbacks, x, axis, "out_idx", out_idx)
9144       _result = _UniqueV2Output._make(_result)
9145       return _result
9146     except _core._FallbackException:
9147       return unique_v2_eager_fallback(
9148           x, axis, out_idx=out_idx, name=name, ctx=_ctx)
9149     except _core._NotOkStatusException as e:
9150       if name is not None:
9151         message = e.message + " name: " + name
9152       else:
9153         message = e.message
9154       _six.raise_from(_core._status_to_exception(e.code, message), None)
9155 
9156 
9157 def unique_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None):
9158   r"""This is the slowpath function for Eager mode.
9159   This is for function unique_v2
9160   """
9161   _ctx = ctx if ctx else _context.context()
9162   if out_idx is None:
9163     out_idx = _dtypes.int32
9164   out_idx = _execute.make_type(out_idx, "out_idx")
9165   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
9166   _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64)
9167   _inputs_flat = [x, axis]
9168   _attrs = ("T", _attr_T, "Taxis", _attr_Taxis, "out_idx", out_idx)
9169   _result = _execute.execute(b"UniqueV2", 2, inputs=_inputs_flat,
9170                              attrs=_attrs, ctx=_ctx, name=name)
9171   _execute.record_gradient(
9172       "UniqueV2", _inputs_flat, _attrs, _result, name)
9173   _result = _UniqueV2Output._make(_result)
9174   return _result
9175 
9176 
9177 _unique_with_counts_outputs = ["y", "idx", "count"]
9178 _UniqueWithCountsOutput = _collections.namedtuple(
9179     "UniqueWithCounts", _unique_with_counts_outputs)
9180 
9181 
9182 def unique_with_counts(x, out_idx=_dtypes.int32, name=None):
9183   r"""Finds unique elements in a 1-D tensor.
9184 
9185   This operation returns a tensor `y` containing all of the unique elements of `x`
9186   sorted in the same order that they occur in `x`. This operation also returns a
9187   tensor `idx` the same size as `x` that contains the index of each value of `x`
9188   in the unique output `y`. Finally, it returns a third tensor `count` that
9189   contains the count of each element of `y` in `x`. In other words:
9190 
9191   `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
9192 
9193   For example:
9194 
9195   ```
9196   # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
9197   y, idx, count = unique_with_counts(x)
9198   y ==> [1, 2, 4, 7, 8]
9199   idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
9200   count ==> [2, 1, 3, 1, 2]
9201   ```
9202 
9203   Args:
9204     x: A `Tensor`. 1-D.
9205     out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
9206     name: A name for the operation (optional).
9207 
9208   Returns:
9209     A tuple of `Tensor` objects (y, idx, count).
9210 
9211     y: A `Tensor`. Has the same type as `x`.
9212     idx: A `Tensor` of type `out_idx`.
9213     count: A `Tensor` of type `out_idx`.
9214   """
9215   _ctx = _context._context
9216   if _ctx is None or not _ctx._eager_context.is_eager:
9217     if out_idx is None:
9218       out_idx = _dtypes.int32
9219     out_idx = _execute.make_type(out_idx, "out_idx")
9220     _, _, _op = _op_def_lib._apply_op_helper(
9221         "UniqueWithCounts", x=x, out_idx=out_idx, name=name)
9222     _result = _op.outputs[:]
9223     _inputs_flat = _op.inputs
9224     _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx"))
9225     _execute.record_gradient(
9226       "UniqueWithCounts", _inputs_flat, _attrs, _result, name)
9227     _result = _UniqueWithCountsOutput._make(_result)
9228     return _result
9229 
9230   else:
9231     try:
9232       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9233         _ctx._context_handle, _ctx._eager_context.device_name,
9234         "UniqueWithCounts", name, _ctx._post_execution_callbacks, x,
9235         "out_idx", out_idx)
9236       _result = _UniqueWithCountsOutput._make(_result)
9237       return _result
9238     except _core._FallbackException:
9239       return unique_with_counts_eager_fallback(
9240           x, out_idx=out_idx, name=name, ctx=_ctx)
9241     except _core._NotOkStatusException as e:
9242       if name is not None:
9243         message = e.message + " name: " + name
9244       else:
9245         message = e.message
9246       _six.raise_from(_core._status_to_exception(e.code, message), None)
9247 
9248 
9249 def unique_with_counts_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None):
9250   r"""This is the slowpath function for Eager mode.
9251   This is for function unique_with_counts
9252   """
9253   _ctx = ctx if ctx else _context.context()
9254   if out_idx is None:
9255     out_idx = _dtypes.int32
9256   out_idx = _execute.make_type(out_idx, "out_idx")
9257   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
9258   _inputs_flat = [x]
9259   _attrs = ("T", _attr_T, "out_idx", out_idx)
9260   _result = _execute.execute(b"UniqueWithCounts", 3, inputs=_inputs_flat,
9261                              attrs=_attrs, ctx=_ctx, name=name)
9262   _execute.record_gradient(
9263       "UniqueWithCounts", _inputs_flat, _attrs, _result, name)
9264   _result = _UniqueWithCountsOutput._make(_result)
9265   return _result
9266 
9267 
9268 _unique_with_counts_v2_outputs = ["y", "idx", "count"]
9269 _UniqueWithCountsV2Output = _collections.namedtuple(
9270     "UniqueWithCountsV2", _unique_with_counts_v2_outputs)
9271 
9272 
9273 def unique_with_counts_v2(x, axis, out_idx=_dtypes.int32, name=None):
9274   r"""Finds unique elements along an axis of a tensor.
9275 
9276   This operation either returns a tensor `y` containing unique elements
9277   along the `axis` of a tensor. The returned unique elements is sorted
9278   in the same order as they occur along `axis` in `x`.
9279   This operation also returns a tensor `idx` and a tensor `count`
9280   that are the same size as the number of the elements in `x` along the
9281   `axis` dimension. The `idx` contains the index in the unique output `y`
9282   and the `count` contains the count in the unique output `y`.
9283   In other words, for an `1-D` tensor `x` with `axis = None:
9284 
9285   `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
9286 
9287   For example:
9288 
9289   ```
9290   # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
9291   y, idx, count = unique_with_counts(x)
9292   y ==> [1, 2, 4, 7, 8]
9293   idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
9294   count ==> [2, 1, 3, 1, 2]
9295   ```
9296 
9297   For an `2-D` tensor `x` with `axis = 0`:
9298 
9299   ```
9300   # tensor 'x' is [[1, 0, 0],
9301   #                [1, 0, 0],
9302   #                [2, 0, 0]]
9303   y, idx, count = unique_with_counts(x, axis=0)
9304   y ==> [[1, 0, 0],
9305          [2, 0, 0]]
9306   idx ==> [0, 0, 1]
9307   count ==> [2, 1]
9308   ```
9309 
9310   For an `2-D` tensor `x` with `axis = 1`:
9311 
9312   ```
9313   # tensor 'x' is [[1, 0, 0],
9314   #                [1, 0, 0],
9315   #                [2, 0, 0]]
9316   y, idx, count = unique_with_counts(x, axis=1)
9317   y ==> [[1, 0],
9318          [1, 0],
9319          [2, 0]]
9320   idx ==> [0, 1, 1]
9321   count ==> [1, 2]
9322   ```
9323 
9324   Args:
9325     x: A `Tensor`. A `Tensor`.
9326     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
9327       A `Tensor` of type `int32` (default: None). The axis of the Tensor to
9328       find the unique elements.
9329     out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
9330     name: A name for the operation (optional).
9331 
9332   Returns:
9333     A tuple of `Tensor` objects (y, idx, count).
9334 
9335     y: A `Tensor`. Has the same type as `x`.
9336     idx: A `Tensor` of type `out_idx`.
9337     count: A `Tensor` of type `out_idx`.
9338   """
9339   _ctx = _context._context
9340   if _ctx is None or not _ctx._eager_context.is_eager:
9341     if out_idx is None:
9342       out_idx = _dtypes.int32
9343     out_idx = _execute.make_type(out_idx, "out_idx")
9344     _, _, _op = _op_def_lib._apply_op_helper(
9345         "UniqueWithCountsV2", x=x, axis=axis, out_idx=out_idx, name=name)
9346     _result = _op.outputs[:]
9347     _inputs_flat = _op.inputs
9348     _attrs = ("T", _op.get_attr("T"), "Taxis", _op.get_attr("Taxis"),
9349               "out_idx", _op.get_attr("out_idx"))
9350     _execute.record_gradient(
9351       "UniqueWithCountsV2", _inputs_flat, _attrs, _result, name)
9352     _result = _UniqueWithCountsV2Output._make(_result)
9353     return _result
9354 
9355   else:
9356     try:
9357       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9358         _ctx._context_handle, _ctx._eager_context.device_name,
9359         "UniqueWithCountsV2", name, _ctx._post_execution_callbacks, x, axis,
9360         "out_idx", out_idx)
9361       _result = _UniqueWithCountsV2Output._make(_result)
9362       return _result
9363     except _core._FallbackException:
9364       return unique_with_counts_v2_eager_fallback(
9365           x, axis, out_idx=out_idx, name=name, ctx=_ctx)
9366     except _core._NotOkStatusException as e:
9367       if name is not None:
9368         message = e.message + " name: " + name
9369       else:
9370         message = e.message
9371       _six.raise_from(_core._status_to_exception(e.code, message), None)
9372 
9373 
9374 def unique_with_counts_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None):
9375   r"""This is the slowpath function for Eager mode.
9376   This is for function unique_with_counts_v2
9377   """
9378   _ctx = ctx if ctx else _context.context()
9379   if out_idx is None:
9380     out_idx = _dtypes.int32
9381   out_idx = _execute.make_type(out_idx, "out_idx")
9382   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
9383   _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64)
9384   _inputs_flat = [x, axis]
9385   _attrs = ("T", _attr_T, "Taxis", _attr_Taxis, "out_idx", out_idx)
9386   _result = _execute.execute(b"UniqueWithCountsV2", 3, inputs=_inputs_flat,
9387                              attrs=_attrs, ctx=_ctx, name=name)
9388   _execute.record_gradient(
9389       "UniqueWithCountsV2", _inputs_flat, _attrs, _result, name)
9390   _result = _UniqueWithCountsV2Output._make(_result)
9391   return _result
9392 
9393 
9394 def unpack(value, num, axis=0, name=None):
9395   r"""Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
9396 
9397   Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
9398   For example, given a tensor of shape `(A, B, C, D)`;
9399 
9400   If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
9401     and each tensor in `output` will have shape `(B, C, D)`. (Note that the
9402     dimension unpacked along is gone, unlike `split`).
9403 
9404   If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
9405     and each tensor in `output` will have shape `(A, C, D)`.
9406   Etc.
9407 
9408   This is the opposite of `pack`.
9409 
9410   Args:
9411     value: A `Tensor`.
9412       1-D or higher, with `axis` dimension size equal to `num`.
9413     num: An `int` that is `>= 0`.
9414     axis: An optional `int`. Defaults to `0`.
9415       Dimension along which to unpack.  Negative values wrap around, so the
9416       valid range is `[-R, R)`.
9417     name: A name for the operation (optional).
9418 
9419   Returns:
9420     A list of `num` `Tensor` objects with the same type as `value`.
9421   """
9422   _ctx = _context._context
9423   if _ctx is None or not _ctx._eager_context.is_eager:
9424     num = _execute.make_int(num, "num")
9425     if axis is None:
9426       axis = 0
9427     axis = _execute.make_int(axis, "axis")
9428     _, _, _op = _op_def_lib._apply_op_helper(
9429         "Unpack", value=value, num=num, axis=axis, name=name)
9430     _result = _op.outputs[:]
9431     _inputs_flat = _op.inputs
9432     _attrs = ("num", _op.get_attr("num"), "T", _op.get_attr("T"), "axis",
9433               _op.get_attr("axis"))
9434     _execute.record_gradient(
9435       "Unpack", _inputs_flat, _attrs, _result, name)
9436     return _result
9437 
9438   else:
9439     try:
9440       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9441         _ctx._context_handle, _ctx._eager_context.device_name, "Unpack", name,
9442         _ctx._post_execution_callbacks, value, "num", num, "axis", axis)
9443       return _result
9444     except _core._FallbackException:
9445       return unpack_eager_fallback(
9446           value, num=num, axis=axis, name=name, ctx=_ctx)
9447     except _core._NotOkStatusException as e:
9448       if name is not None:
9449         message = e.message + " name: " + name
9450       else:
9451         message = e.message
9452       _six.raise_from(_core._status_to_exception(e.code, message), None)
9453 
9454 
9455 def unpack_eager_fallback(value, num, axis=0, name=None, ctx=None):
9456   r"""This is the slowpath function for Eager mode.
9457   This is for function unpack
9458   """
9459   _ctx = ctx if ctx else _context.context()
9460   num = _execute.make_int(num, "num")
9461   if axis is None:
9462     axis = 0
9463   axis = _execute.make_int(axis, "axis")
9464   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
9465   _inputs_flat = [value]
9466   _attrs = ("num", num, "T", _attr_T, "axis", axis)
9467   _result = _execute.execute(b"Unpack", num, inputs=_inputs_flat,
9468                              attrs=_attrs, ctx=_ctx, name=name)
9469   _execute.record_gradient(
9470       "Unpack", _inputs_flat, _attrs, _result, name)
9471   return _result
9472 
9473 
9474 @tf_export('unravel_index')
9475 def unravel_index(indices, dims, name=None):
9476   r"""Converts a flat index or array of flat indices into a tuple of
9477 
9478   coordinate arrays.
9479 
9480   @compatibility(numpy)
9481   Equivalent to np.unravel_index
9482   @end_compatibility
9483 
9484   Args:
9485     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
9486       An 0-D or 1-D `int` Tensor whose elements are indices into the
9487       flattened version of an array of dimensions dims.
9488     dims: A `Tensor`. Must have the same type as `indices`.
9489       An 1-D `int` Tensor. The shape of the array to use for unraveling
9490       indices.
9491     name: A name for the operation (optional).
9492 
9493   Returns:
9494     A `Tensor`. Has the same type as `indices`.
9495   """
9496   _ctx = _context._context
9497   if _ctx is None or not _ctx._eager_context.is_eager:
9498     _, _, _op = _op_def_lib._apply_op_helper(
9499         "UnravelIndex", indices=indices, dims=dims, name=name)
9500     _result = _op.outputs[:]
9501     _inputs_flat = _op.inputs
9502     _attrs = ("Tidx", _op.get_attr("Tidx"))
9503     _execute.record_gradient(
9504       "UnravelIndex", _inputs_flat, _attrs, _result, name)
9505     _result, = _result
9506     return _result
9507 
9508   else:
9509     try:
9510       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9511         _ctx._context_handle, _ctx._eager_context.device_name, "UnravelIndex",
9512         name, _ctx._post_execution_callbacks, indices, dims)
9513       return _result
9514     except _core._FallbackException:
9515       return unravel_index_eager_fallback(
9516           indices, dims, name=name, ctx=_ctx)
9517     except _core._NotOkStatusException as e:
9518       if name is not None:
9519         message = e.message + " name: " + name
9520       else:
9521         message = e.message
9522       _six.raise_from(_core._status_to_exception(e.code, message), None)
9523 
9524 
9525 def unravel_index_eager_fallback(indices, dims, name=None, ctx=None):
9526   r"""This is the slowpath function for Eager mode.
9527   This is for function unravel_index
9528   """
9529   _ctx = ctx if ctx else _context.context()
9530   _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([indices, dims], _ctx, _dtypes.int32)
9531   (indices, dims) = _inputs_Tidx
9532   _inputs_flat = [indices, dims]
9533   _attrs = ("Tidx", _attr_Tidx)
9534   _result = _execute.execute(b"UnravelIndex", 1, inputs=_inputs_flat,
9535                              attrs=_attrs, ctx=_ctx, name=name)
9536   _execute.record_gradient(
9537       "UnravelIndex", _inputs_flat, _attrs, _result, name)
9538   _result, = _result
9539   return _result
9540 
9541 
9542 def upper_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None):
9543   r"""Applies upper_bound(sorted_search_values, values) along each row.
9544 
9545   Each set of rows with the same index in (sorted_inputs, values) is treated
9546   independently.  The resulting row is the equivalent of calling
9547   `np.searchsorted(sorted_inputs, values, side='right')`.
9548 
9549   The result is not a global index to the entire 
9550   `Tensor`, but rather just the index in the last dimension.
9551 
9552   A 2-D example:
9553     sorted_sequence = [[0, 3, 9, 9, 10],
9554                        [1, 2, 3, 4, 5]]
9555     values = [[2, 4, 9],
9556               [0, 2, 6]]
9557 
9558     result = UpperBound(sorted_sequence, values)
9559 
9560     result == [[1, 2, 4],
9561                [0, 2, 5]]
9562 
9563   Args:
9564     sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered.
9565     values: A `Tensor`. Must have the same type as `sorted_inputs`.
9566       2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
9567       the values that will be searched for in `sorted_search_values`.
9568     out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
9569     name: A name for the operation (optional).
9570 
9571   Returns:
9572     A `Tensor` of type `out_type`.
9573   """
9574   _ctx = _context._context
9575   if _ctx is None or not _ctx._eager_context.is_eager:
9576     if out_type is None:
9577       out_type = _dtypes.int32
9578     out_type = _execute.make_type(out_type, "out_type")
9579     _, _, _op = _op_def_lib._apply_op_helper(
9580         "UpperBound", sorted_inputs=sorted_inputs, values=values,
9581         out_type=out_type, name=name)
9582     _result = _op.outputs[:]
9583     _inputs_flat = _op.inputs
9584     _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type"))
9585     _execute.record_gradient(
9586       "UpperBound", _inputs_flat, _attrs, _result, name)
9587     _result, = _result
9588     return _result
9589 
9590   else:
9591     try:
9592       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9593         _ctx._context_handle, _ctx._eager_context.device_name, "UpperBound",
9594         name, _ctx._post_execution_callbacks, sorted_inputs, values,
9595         "out_type", out_type)
9596       return _result
9597     except _core._FallbackException:
9598       return upper_bound_eager_fallback(
9599           sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx)
9600     except _core._NotOkStatusException as e:
9601       if name is not None:
9602         message = e.message + " name: " + name
9603       else:
9604         message = e.message
9605       _six.raise_from(_core._status_to_exception(e.code, message), None)
9606 
9607 
9608 def upper_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None):
9609   r"""This is the slowpath function for Eager mode.
9610   This is for function upper_bound
9611   """
9612   _ctx = ctx if ctx else _context.context()
9613   if out_type is None:
9614     out_type = _dtypes.int32
9615   out_type = _execute.make_type(out_type, "out_type")
9616   _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx)
9617   (sorted_inputs, values) = _inputs_T
9618   _inputs_flat = [sorted_inputs, values]
9619   _attrs = ("T", _attr_T, "out_type", out_type)
9620   _result = _execute.execute(b"UpperBound", 1, inputs=_inputs_flat,
9621                              attrs=_attrs, ctx=_ctx, name=name)
9622   _execute.record_gradient(
9623       "UpperBound", _inputs_flat, _attrs, _result, name)
9624   _result, = _result
9625   return _result
9626 
9627 
9628 def where(condition, name=None):
9629   r"""Returns locations of nonzero / true values in a tensor.
9630 
9631   This operation returns the coordinates of true elements in `condition`. The
9632   coordinates are returned in a 2-D tensor where the first dimension (rows)
9633   represents the number of true elements, and the second dimension (columns)
9634   represents the coordinates of the true elements. Keep in mind, the shape of
9635   the output tensor can vary depending on how many true values there are in
9636   `condition`. Indices are output in row-major order.
9637 
9638   For example:
9639 
9640   ```
9641   # 'input' tensor is [[True, False]
9642   #                    [True, False]]
9643   # 'input' has two true values, so output has two coordinates.
9644   # 'input' has rank of 2, so coordinates have two indices.
9645   where(input) ==> [[0, 0],
9646                     [1, 0]]
9647 
9648   # `condition` tensor is [[[True, False]
9649   #                     [True, False]]
9650   #                    [[False, True]
9651   #                     [False, True]]
9652   #                    [[False, False]
9653   #                     [False, True]]]
9654   # 'input' has 5 true values, so output has 5 coordinates.
9655   # 'input' has rank of 3, so coordinates have three indices.
9656   where(input) ==> [[0, 0, 0],
9657                     [0, 1, 0],
9658                     [1, 0, 1],
9659                     [1, 1, 1],
9660                     [2, 1, 1]]
9661 
9662   # `condition` tensor is [[[1.5,  0.0]
9663   #                     [-0.5, 0.0]]
9664   #                    [[0.0,  0.25]
9665   #                     [0.0,  0.75]]
9666   #                    [[0.0,  0.0]
9667   #                     [0.0,  0.01]]]
9668   # 'input' has 5 nonzero values, so output has 5 coordinates.
9669   # 'input' has rank of 3, so coordinates have three indices.
9670   where(input) ==> [[0, 0, 0],
9671                     [0, 1, 0],
9672                     [1, 0, 1],
9673                     [1, 1, 1],
9674                     [2, 1, 1]]
9675 
9676   # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
9677   #                     [0.0 + 0.5j, 0.0  + 0.0j]]
9678   #                    [[0.0 + 0.0j, 0.25 + 1.5j]
9679   #                     [0.0 + 0.0j, 0.75 + 0.0j]]
9680   #                    [[0.0 + 0.0j, 0.0  + 0.0j]
9681   #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
9682   # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
9683   # 'input' has rank of 3, so coordinates have three indices.
9684   where(input) ==> [[0, 0, 0],
9685                     [0, 1, 0],
9686                     [1, 0, 1],
9687                     [1, 1, 1],
9688                     [2, 1, 1]]
9689   ```
9690 
9691   Args:
9692     condition: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`.
9693     name: A name for the operation (optional).
9694 
9695   Returns:
9696     A `Tensor` of type `int64`.
9697   """
9698   _ctx = _context._context
9699   if _ctx is None or not _ctx._eager_context.is_eager:
9700     _, _, _op = _op_def_lib._apply_op_helper(
9701         "Where", input=condition, name=name)
9702     _result = _op.outputs[:]
9703     _inputs_flat = _op.inputs
9704     _attrs = ("T", _op.get_attr("T"))
9705     _execute.record_gradient(
9706       "Where", _inputs_flat, _attrs, _result, name)
9707     _result, = _result
9708     return _result
9709 
9710   else:
9711     try:
9712       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9713         _ctx._context_handle, _ctx._eager_context.device_name, "Where", name,
9714         _ctx._post_execution_callbacks, condition)
9715       return _result
9716     except _core._FallbackException:
9717       return where_eager_fallback(
9718           condition, name=name, ctx=_ctx)
9719     except _core._NotOkStatusException as e:
9720       if name is not None:
9721         message = e.message + " name: " + name
9722       else:
9723         message = e.message
9724       _six.raise_from(_core._status_to_exception(e.code, message), None)
9725 
9726 
9727 def where_eager_fallback(condition, name=None, ctx=None):
9728   r"""This is the slowpath function for Eager mode.
9729   This is for function where
9730   """
9731   _ctx = ctx if ctx else _context.context()
9732   _attr_T, (condition,) = _execute.args_to_matching_eager([condition], _ctx, _dtypes.bool)
9733   _inputs_flat = [condition]
9734   _attrs = ("T", _attr_T)
9735   _result = _execute.execute(b"Where", 1, inputs=_inputs_flat, attrs=_attrs,
9736                              ctx=_ctx, name=name)
9737   _execute.record_gradient(
9738       "Where", _inputs_flat, _attrs, _result, name)
9739   _result, = _result
9740   return _result
9741 
9742 
9743 def zeros_like(x, name=None):
9744   r"""Returns a tensor of zeros with the same shape and type as x.
9745 
9746   Args:
9747     x: A `Tensor`. a tensor of type T.
9748     name: A name for the operation (optional).
9749 
9750   Returns:
9751     A `Tensor`. Has the same type as `x`.
9752   """
9753   _ctx = _context._context
9754   if _ctx is None or not _ctx._eager_context.is_eager:
9755     _, _, _op = _op_def_lib._apply_op_helper(
9756         "ZerosLike", x=x, name=name)
9757     _result = _op.outputs[:]
9758     _inputs_flat = _op.inputs
9759     _attrs = ("T", _op.get_attr("T"))
9760     _execute.record_gradient(
9761       "ZerosLike", _inputs_flat, _attrs, _result, name)
9762     _result, = _result
9763     return _result
9764 
9765   else:
9766     try:
9767       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9768         _ctx._context_handle, _ctx._eager_context.device_name, "ZerosLike",
9769         name, _ctx._post_execution_callbacks, x)
9770       return _result
9771     except _core._FallbackException:
9772       return zeros_like_eager_fallback(
9773           x, name=name, ctx=_ctx)
9774     except _core._NotOkStatusException as e:
9775       if name is not None:
9776         message = e.message + " name: " + name
9777       else:
9778         message = e.message
9779       _six.raise_from(_core._status_to_exception(e.code, message), None)
9780 
9781 
9782 def zeros_like_eager_fallback(x, name=None, ctx=None):
9783   r"""This is the slowpath function for Eager mode.
9784   This is for function zeros_like
9785   """
9786   _ctx = ctx if ctx else _context.context()
9787   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
9788   _inputs_flat = [x]
9789   _attrs = ("T", _attr_T)
9790   _result = _execute.execute(b"ZerosLike", 1, inputs=_inputs_flat,
9791                              attrs=_attrs, ctx=_ctx, name=name)
9792   _execute.record_gradient(
9793       "ZerosLike", _inputs_flat, _attrs, _result, name)
9794   _result, = _result
9795   return _result
9796 
9797 def _InitOpDefLibrary(op_list_proto_bytes):
9798   op_list = _op_def_pb2.OpList()
9799   op_list.ParseFromString(op_list_proto_bytes)
9800   _op_def_registry.register_op_list(op_list)
9801   op_def_lib = _op_def_library.OpDefLibrary()
9802   op_def_lib.add_op_list(op_list)
9803   return op_def_lib
9804 # op {
9805 #   name: "BatchMatrixBandPart"
9806 #   input_arg {
9807 #     name: "input"
9808 #     type_attr: "T"
9809 #   }
9810 #   input_arg {
9811 #     name: "num_lower"
9812 #     type: DT_INT64
9813 #   }
9814 #   input_arg {
9815 #     name: "num_upper"
9816 #     type: DT_INT64
9817 #   }
9818 #   output_arg {
9819 #     name: "band"
9820 #     type_attr: "T"
9821 #   }
9822 #   attr {
9823 #     name: "T"
9824 #     type: "type"
9825 #   }
9826 #   deprecation {
9827 #     version: 14
9828 #     explanation: "Use MatrixBandPart"
9829 #   }
9830 # }
9831 # op {
9832 #   name: "BatchMatrixDiag"
9833 #   input_arg {
9834 #     name: "diagonal"
9835 #     type_attr: "T"
9836 #   }
9837 #   output_arg {
9838 #     name: "output"
9839 #     type_attr: "T"
9840 #   }
9841 #   attr {
9842 #     name: "T"
9843 #     type: "type"
9844 #   }
9845 #   deprecation {
9846 #     version: 14
9847 #     explanation: "Use MatrixDiag"
9848 #   }
9849 # }
9850 # op {
9851 #   name: "BatchMatrixDiagPart"
9852 #   input_arg {
9853 #     name: "input"
9854 #     type_attr: "T"
9855 #   }
9856 #   output_arg {
9857 #     name: "diagonal"
9858 #     type_attr: "T"
9859 #   }
9860 #   attr {
9861 #     name: "T"
9862 #     type: "type"
9863 #   }
9864 #   deprecation {
9865 #     version: 14
9866 #     explanation: "Use MatrixDiagPart"
9867 #   }
9868 # }
9869 # op {
9870 #   name: "BatchMatrixSetDiag"
9871 #   input_arg {
9872 #     name: "input"
9873 #     type_attr: "T"
9874 #   }
9875 #   input_arg {
9876 #     name: "diagonal"
9877 #     type_attr: "T"
9878 #   }
9879 #   output_arg {
9880 #     name: "output"
9881 #     type_attr: "T"
9882 #   }
9883 #   attr {
9884 #     name: "T"
9885 #     type: "type"
9886 #   }
9887 #   deprecation {
9888 #     version: 14
9889 #     explanation: "Use MatrixSetDiag"
9890 #   }
9891 # }
9892 # op {
9893 #   name: "BatchToSpace"
9894 #   input_arg {
9895 #     name: "input"
9896 #     type_attr: "T"
9897 #   }
9898 #   input_arg {
9899 #     name: "crops"
9900 #     type_attr: "Tidx"
9901 #   }
9902 #   output_arg {
9903 #     name: "output"
9904 #     type_attr: "T"
9905 #   }
9906 #   attr {
9907 #     name: "T"
9908 #     type: "type"
9909 #   }
9910 #   attr {
9911 #     name: "block_size"
9912 #     type: "int"
9913 #     has_minimum: true
9914 #     minimum: 2
9915 #   }
9916 #   attr {
9917 #     name: "Tidx"
9918 #     type: "type"
9919 #     default_value {
9920 #       type: DT_INT32
9921 #     }
9922 #     allowed_values {
9923 #       list {
9924 #         type: DT_INT32
9925 #         type: DT_INT64
9926 #       }
9927 #     }
9928 #   }
9929 # }
9930 # op {
9931 #   name: "BatchToSpaceND"
9932 #   input_arg {
9933 #     name: "input"
9934 #     type_attr: "T"
9935 #   }
9936 #   input_arg {
9937 #     name: "block_shape"
9938 #     type_attr: "Tblock_shape"
9939 #   }
9940 #   input_arg {
9941 #     name: "crops"
9942 #     type_attr: "Tcrops"
9943 #   }
9944 #   output_arg {
9945 #     name: "output"
9946 #     type_attr: "T"
9947 #   }
9948 #   attr {
9949 #     name: "T"
9950 #     type: "type"
9951 #   }
9952 #   attr {
9953 #     name: "Tblock_shape"
9954 #     type: "type"
9955 #     default_value {
9956 #       type: DT_INT32
9957 #     }
9958 #     allowed_values {
9959 #       list {
9960 #         type: DT_INT32
9961 #         type: DT_INT64
9962 #       }
9963 #     }
9964 #   }
9965 #   attr {
9966 #     name: "Tcrops"
9967 #     type: "type"
9968 #     default_value {
9969 #       type: DT_INT32
9970 #     }
9971 #     allowed_values {
9972 #       list {
9973 #         type: DT_INT32
9974 #         type: DT_INT64
9975 #       }
9976 #     }
9977 #   }
9978 # }
9979 # op {
9980 #   name: "Bitcast"
9981 #   input_arg {
9982 #     name: "input"
9983 #     type_attr: "T"
9984 #   }
9985 #   output_arg {
9986 #     name: "output"
9987 #     type_attr: "type"
9988 #   }
9989 #   attr {
9990 #     name: "T"
9991 #     type: "type"
9992 #     allowed_values {
9993 #       list {
9994 #         type: DT_BFLOAT16
9995 #         type: DT_HALF
9996 #         type: DT_FLOAT
9997 #         type: DT_DOUBLE
9998 #         type: DT_INT64
9999 #         type: DT_INT32
10000 #         type: DT_UINT8
10001 #         type: DT_UINT16
10002 #         type: DT_UINT32
10003 #         type: DT_UINT64
10004 #         type: DT_INT8
10005 #         type: DT_INT16
10006 #         type: DT_COMPLEX64
10007 #         type: DT_COMPLEX128
10008 #         type: DT_QINT8
10009 #         type: DT_QUINT8
10010 #         type: DT_QINT16
10011 #         type: DT_QUINT16
10012 #         type: DT_QINT32
10013 #       }
10014 #     }
10015 #   }
10016 #   attr {
10017 #     name: "type"
10018 #     type: "type"
10019 #     allowed_values {
10020 #       list {
10021 #         type: DT_BFLOAT16
10022 #         type: DT_HALF
10023 #         type: DT_FLOAT
10024 #         type: DT_DOUBLE
10025 #         type: DT_INT64
10026 #         type: DT_INT32
10027 #         type: DT_UINT8
10028 #         type: DT_UINT16
10029 #         type: DT_UINT32
10030 #         type: DT_UINT64
10031 #         type: DT_INT8
10032 #         type: DT_INT16
10033 #         type: DT_COMPLEX64
10034 #         type: DT_COMPLEX128
10035 #         type: DT_QINT8
10036 #         type: DT_QUINT8
10037 #         type: DT_QINT16
10038 #         type: DT_QUINT16
10039 #         type: DT_QINT32
10040 #       }
10041 #     }
10042 #   }
10043 # }
10044 # op {
10045 #   name: "BroadcastArgs"
10046 #   input_arg {
10047 #     name: "s0"
10048 #     type_attr: "T"
10049 #   }
10050 #   input_arg {
10051 #     name: "s1"
10052 #     type_attr: "T"
10053 #   }
10054 #   output_arg {
10055 #     name: "r0"
10056 #     type_attr: "T"
10057 #   }
10058 #   attr {
10059 #     name: "T"
10060 #     type: "type"
10061 #     default_value {
10062 #       type: DT_INT32
10063 #     }
10064 #     allowed_values {
10065 #       list {
10066 #         type: DT_INT32
10067 #         type: DT_INT64
10068 #       }
10069 #     }
10070 #   }
10071 # }
10072 # op {
10073 #   name: "BroadcastGradientArgs"
10074 #   input_arg {
10075 #     name: "s0"
10076 #     type_attr: "T"
10077 #   }
10078 #   input_arg {
10079 #     name: "s1"
10080 #     type_attr: "T"
10081 #   }
10082 #   output_arg {
10083 #     name: "r0"
10084 #     type_attr: "T"
10085 #   }
10086 #   output_arg {
10087 #     name: "r1"
10088 #     type_attr: "T"
10089 #   }
10090 #   attr {
10091 #     name: "T"
10092 #     type: "type"
10093 #     default_value {
10094 #       type: DT_INT32
10095 #     }
10096 #     allowed_values {
10097 #       list {
10098 #         type: DT_INT32
10099 #         type: DT_INT64
10100 #       }
10101 #     }
10102 #   }
10103 # }
10104 # op {
10105 #   name: "BroadcastTo"
10106 #   input_arg {
10107 #     name: "input"
10108 #     type_attr: "T"
10109 #   }
10110 #   input_arg {
10111 #     name: "shape"
10112 #     type_attr: "Tidx"
10113 #   }
10114 #   output_arg {
10115 #     name: "output"
10116 #     type_attr: "T"
10117 #   }
10118 #   attr {
10119 #     name: "T"
10120 #     type: "type"
10121 #   }
10122 #   attr {
10123 #     name: "Tidx"
10124 #     type: "type"
10125 #     default_value {
10126 #       type: DT_INT32
10127 #     }
10128 #     allowed_values {
10129 #       list {
10130 #         type: DT_INT32
10131 #         type: DT_INT64
10132 #       }
10133 #     }
10134 #   }
10135 # }
10136 # op {
10137 #   name: "CheckNumerics"
10138 #   input_arg {
10139 #     name: "tensor"
10140 #     type_attr: "T"
10141 #   }
10142 #   output_arg {
10143 #     name: "output"
10144 #     type_attr: "T"
10145 #   }
10146 #   attr {
10147 #     name: "T"
10148 #     type: "type"
10149 #     allowed_values {
10150 #       list {
10151 #         type: DT_BFLOAT16
10152 #         type: DT_HALF
10153 #         type: DT_FLOAT
10154 #         type: DT_DOUBLE
10155 #       }
10156 #     }
10157 #   }
10158 #   attr {
10159 #     name: "message"
10160 #     type: "string"
10161 #   }
10162 # }
10163 # op {
10164 #   name: "Concat"
10165 #   input_arg {
10166 #     name: "concat_dim"
10167 #     type: DT_INT32
10168 #   }
10169 #   input_arg {
10170 #     name: "values"
10171 #     type_attr: "T"
10172 #     number_attr: "N"
10173 #   }
10174 #   output_arg {
10175 #     name: "output"
10176 #     type_attr: "T"
10177 #   }
10178 #   attr {
10179 #     name: "N"
10180 #     type: "int"
10181 #     has_minimum: true
10182 #     minimum: 2
10183 #   }
10184 #   attr {
10185 #     name: "T"
10186 #     type: "type"
10187 #   }
10188 # }
10189 # op {
10190 #   name: "ConcatOffset"
10191 #   input_arg {
10192 #     name: "concat_dim"
10193 #     type: DT_INT32
10194 #   }
10195 #   input_arg {
10196 #     name: "shape"
10197 #     type: DT_INT32
10198 #     number_attr: "N"
10199 #   }
10200 #   output_arg {
10201 #     name: "offset"
10202 #     type: DT_INT32
10203 #     number_attr: "N"
10204 #   }
10205 #   attr {
10206 #     name: "N"
10207 #     type: "int"
10208 #     has_minimum: true
10209 #     minimum: 2
10210 #   }
10211 # }
10212 # op {
10213 #   name: "ConcatV2"
10214 #   input_arg {
10215 #     name: "values"
10216 #     type_attr: "T"
10217 #     number_attr: "N"
10218 #   }
10219 #   input_arg {
10220 #     name: "axis"
10221 #     type_attr: "Tidx"
10222 #   }
10223 #   output_arg {
10224 #     name: "output"
10225 #     type_attr: "T"
10226 #   }
10227 #   attr {
10228 #     name: "N"
10229 #     type: "int"
10230 #     has_minimum: true
10231 #     minimum: 2
10232 #   }
10233 #   attr {
10234 #     name: "T"
10235 #     type: "type"
10236 #   }
10237 #   attr {
10238 #     name: "Tidx"
10239 #     type: "type"
10240 #     default_value {
10241 #       type: DT_INT32
10242 #     }
10243 #     allowed_values {
10244 #       list {
10245 #         type: DT_INT32
10246 #         type: DT_INT64
10247 #       }
10248 #     }
10249 #   }
10250 # }
10251 # op {
10252 #   name: "ConjugateTranspose"
10253 #   input_arg {
10254 #     name: "x"
10255 #     type_attr: "T"
10256 #   }
10257 #   input_arg {
10258 #     name: "perm"
10259 #     type_attr: "Tperm"
10260 #   }
10261 #   output_arg {
10262 #     name: "y"
10263 #     type_attr: "T"
10264 #   }
10265 #   attr {
10266 #     name: "T"
10267 #     type: "type"
10268 #   }
10269 #   attr {
10270 #     name: "Tperm"
10271 #     type: "type"
10272 #     default_value {
10273 #       type: DT_INT32
10274 #     }
10275 #     allowed_values {
10276 #       list {
10277 #         type: DT_INT32
10278 #         type: DT_INT64
10279 #       }
10280 #     }
10281 #   }
10282 # }
10283 # op {
10284 #   name: "Const"
10285 #   output_arg {
10286 #     name: "output"
10287 #     type_attr: "dtype"
10288 #   }
10289 #   attr {
10290 #     name: "value"
10291 #     type: "tensor"
10292 #   }
10293 #   attr {
10294 #     name: "dtype"
10295 #     type: "type"
10296 #   }
10297 # }
10298 # op {
10299 #   name: "DebugGradientIdentity"
10300 #   input_arg {
10301 #     name: "input"
10302 #     type_attr: "T"
10303 #   }
10304 #   output_arg {
10305 #     name: "output"
10306 #     type_attr: "T"
10307 #   }
10308 #   attr {
10309 #     name: "T"
10310 #     type: "type"
10311 #   }
10312 #   allows_uninitialized_input: true
10313 # }
10314 # op {
10315 #   name: "DebugGradientRefIdentity"
10316 #   input_arg {
10317 #     name: "input"
10318 #     type_attr: "T"
10319 #     is_ref: true
10320 #   }
10321 #   output_arg {
10322 #     name: "output"
10323 #     type_attr: "T"
10324 #     is_ref: true
10325 #   }
10326 #   attr {
10327 #     name: "T"
10328 #     type: "type"
10329 #   }
10330 #   allows_uninitialized_input: true
10331 # }
10332 # op {
10333 #   name: "DeepCopy"
10334 #   input_arg {
10335 #     name: "x"
10336 #     type_attr: "T"
10337 #   }
10338 #   output_arg {
10339 #     name: "y"
10340 #     type_attr: "T"
10341 #   }
10342 #   attr {
10343 #     name: "T"
10344 #     type: "type"
10345 #   }
10346 #   is_stateful: true
10347 # }
10348 # op {
10349 #   name: "DepthToSpace"
10350 #   input_arg {
10351 #     name: "input"
10352 #     type_attr: "T"
10353 #   }
10354 #   output_arg {
10355 #     name: "output"
10356 #     type_attr: "T"
10357 #   }
10358 #   attr {
10359 #     name: "T"
10360 #     type: "type"
10361 #   }
10362 #   attr {
10363 #     name: "block_size"
10364 #     type: "int"
10365 #     has_minimum: true
10366 #     minimum: 2
10367 #   }
10368 #   attr {
10369 #     name: "data_format"
10370 #     type: "string"
10371 #     default_value {
10372 #       s: "NHWC"
10373 #     }
10374 #     allowed_values {
10375 #       list {
10376 #         s: "NHWC"
10377 #         s: "NCHW"
10378 #         s: "NCHW_VECT_C"
10379 #       }
10380 #     }
10381 #   }
10382 # }
10383 # op {
10384 #   name: "Dequantize"
10385 #   input_arg {
10386 #     name: "input"
10387 #     type_attr: "T"
10388 #   }
10389 #   input_arg {
10390 #     name: "min_range"
10391 #     type: DT_FLOAT
10392 #   }
10393 #   input_arg {
10394 #     name: "max_range"
10395 #     type: DT_FLOAT
10396 #   }
10397 #   output_arg {
10398 #     name: "output"
10399 #     type: DT_FLOAT
10400 #   }
10401 #   attr {
10402 #     name: "T"
10403 #     type: "type"
10404 #     allowed_values {
10405 #       list {
10406 #         type: DT_QINT8
10407 #         type: DT_QUINT8
10408 #         type: DT_QINT32
10409 #         type: DT_QINT16
10410 #         type: DT_QUINT16
10411 #       }
10412 #     }
10413 #   }
10414 #   attr {
10415 #     name: "mode"
10416 #     type: "string"
10417 #     default_value {
10418 #       s: "MIN_COMBINED"
10419 #     }
10420 #     allowed_values {
10421 #       list {
10422 #         s: "MIN_COMBINED"
10423 #         s: "MIN_FIRST"
10424 #         s: "SCALED"
10425 #       }
10426 #     }
10427 #   }
10428 # }
10429 # op {
10430 #   name: "Diag"
10431 #   input_arg {
10432 #     name: "diagonal"
10433 #     type_attr: "T"
10434 #   }
10435 #   output_arg {
10436 #     name: "output"
10437 #     type_attr: "T"
10438 #   }
10439 #   attr {
10440 #     name: "T"
10441 #     type: "type"
10442 #     allowed_values {
10443 #       list {
10444 #         type: DT_BFLOAT16
10445 #         type: DT_HALF
10446 #         type: DT_FLOAT
10447 #         type: DT_DOUBLE
10448 #         type: DT_INT32
10449 #         type: DT_INT64
10450 #         type: DT_COMPLEX64
10451 #         type: DT_COMPLEX128
10452 #       }
10453 #     }
10454 #   }
10455 # }
10456 # op {
10457 #   name: "DiagPart"
10458 #   input_arg {
10459 #     name: "input"
10460 #     type_attr: "T"
10461 #   }
10462 #   output_arg {
10463 #     name: "diagonal"
10464 #     type_attr: "T"
10465 #   }
10466 #   attr {
10467 #     name: "T"
10468 #     type: "type"
10469 #     allowed_values {
10470 #       list {
10471 #         type: DT_BFLOAT16
10472 #         type: DT_HALF
10473 #         type: DT_FLOAT
10474 #         type: DT_DOUBLE
10475 #         type: DT_INT32
10476 #         type: DT_INT64
10477 #         type: DT_COMPLEX64
10478 #         type: DT_COMPLEX128
10479 #       }
10480 #     }
10481 #   }
10482 # }
10483 # op {
10484 #   name: "EditDistance"
10485 #   input_arg {
10486 #     name: "hypothesis_indices"
10487 #     type: DT_INT64
10488 #   }
10489 #   input_arg {
10490 #     name: "hypothesis_values"
10491 #     type_attr: "T"
10492 #   }
10493 #   input_arg {
10494 #     name: "hypothesis_shape"
10495 #     type: DT_INT64
10496 #   }
10497 #   input_arg {
10498 #     name: "truth_indices"
10499 #     type: DT_INT64
10500 #   }
10501 #   input_arg {
10502 #     name: "truth_values"
10503 #     type_attr: "T"
10504 #   }
10505 #   input_arg {
10506 #     name: "truth_shape"
10507 #     type: DT_INT64
10508 #   }
10509 #   output_arg {
10510 #     name: "output"
10511 #     type: DT_FLOAT
10512 #   }
10513 #   attr {
10514 #     name: "normalize"
10515 #     type: "bool"
10516 #     default_value {
10517 #       b: true
10518 #     }
10519 #   }
10520 #   attr {
10521 #     name: "T"
10522 #     type: "type"
10523 #   }
10524 # }
10525 # op {
10526 #   name: "Empty"
10527 #   input_arg {
10528 #     name: "shape"
10529 #     type: DT_INT32
10530 #   }
10531 #   output_arg {
10532 #     name: "output"
10533 #     type_attr: "dtype"
10534 #   }
10535 #   attr {
10536 #     name: "dtype"
10537 #     type: "type"
10538 #   }
10539 #   attr {
10540 #     name: "init"
10541 #     type: "bool"
10542 #     default_value {
10543 #       b: false
10544 #     }
10545 #   }
10546 #   is_stateful: true
10547 # }
10548 # op {
10549 #   name: "EnsureShape"
10550 #   input_arg {
10551 #     name: "input"
10552 #     type_attr: "T"
10553 #   }
10554 #   output_arg {
10555 #     name: "output"
10556 #     type_attr: "T"
10557 #   }
10558 #   attr {
10559 #     name: "shape"
10560 #     type: "shape"
10561 #   }
10562 #   attr {
10563 #     name: "T"
10564 #     type: "type"
10565 #   }
10566 # }
10567 # op {
10568 #   name: "ExpandDims"
10569 #   input_arg {
10570 #     name: "input"
10571 #     type_attr: "T"
10572 #   }
10573 #   input_arg {
10574 #     name: "dim"
10575 #     type_attr: "Tdim"
10576 #   }
10577 #   output_arg {
10578 #     name: "output"
10579 #     type_attr: "T"
10580 #   }
10581 #   attr {
10582 #     name: "T"
10583 #     type: "type"
10584 #   }
10585 #   attr {
10586 #     name: "Tdim"
10587 #     type: "type"
10588 #     default_value {
10589 #       type: DT_INT32
10590 #     }
10591 #     allowed_values {
10592 #       list {
10593 #         type: DT_INT32
10594 #         type: DT_INT64
10595 #       }
10596 #     }
10597 #   }
10598 # }
10599 # op {
10600 #   name: "ExtractImagePatches"
10601 #   input_arg {
10602 #     name: "images"
10603 #     type_attr: "T"
10604 #   }
10605 #   output_arg {
10606 #     name: "patches"
10607 #     type_attr: "T"
10608 #   }
10609 #   attr {
10610 #     name: "ksizes"
10611 #     type: "list(int)"
10612 #     has_minimum: true
10613 #     minimum: 4
10614 #   }
10615 #   attr {
10616 #     name: "strides"
10617 #     type: "list(int)"
10618 #     has_minimum: true
10619 #     minimum: 4
10620 #   }
10621 #   attr {
10622 #     name: "rates"
10623 #     type: "list(int)"
10624 #     has_minimum: true
10625 #     minimum: 4
10626 #   }
10627 #   attr {
10628 #     name: "T"
10629 #     type: "type"
10630 #     allowed_values {
10631 #       list {
10632 #         type: DT_FLOAT
10633 #         type: DT_DOUBLE
10634 #         type: DT_INT32
10635 #         type: DT_UINT8
10636 #         type: DT_INT16
10637 #         type: DT_INT8
10638 #         type: DT_INT64
10639 #         type: DT_BFLOAT16
10640 #         type: DT_UINT16
10641 #         type: DT_HALF
10642 #         type: DT_UINT32
10643 #         type: DT_UINT64
10644 #       }
10645 #     }
10646 #   }
10647 #   attr {
10648 #     name: "padding"
10649 #     type: "string"
10650 #     allowed_values {
10651 #       list {
10652 #         s: "SAME"
10653 #         s: "VALID"
10654 #       }
10655 #     }
10656 #   }
10657 # }
10658 # op {
10659 #   name: "ExtractVolumePatches"
10660 #   input_arg {
10661 #     name: "input"
10662 #     type_attr: "T"
10663 #   }
10664 #   output_arg {
10665 #     name: "patches"
10666 #     type_attr: "T"
10667 #   }
10668 #   attr {
10669 #     name: "ksizes"
10670 #     type: "list(int)"
10671 #     has_minimum: true
10672 #     minimum: 5
10673 #   }
10674 #   attr {
10675 #     name: "strides"
10676 #     type: "list(int)"
10677 #     has_minimum: true
10678 #     minimum: 5
10679 #   }
10680 #   attr {
10681 #     name: "T"
10682 #     type: "type"
10683 #     allowed_values {
10684 #       list {
10685 #         type: DT_FLOAT
10686 #         type: DT_DOUBLE
10687 #         type: DT_INT32
10688 #         type: DT_UINT8
10689 #         type: DT_INT16
10690 #         type: DT_INT8
10691 #         type: DT_INT64
10692 #         type: DT_BFLOAT16
10693 #         type: DT_UINT16
10694 #         type: DT_HALF
10695 #         type: DT_UINT32
10696 #         type: DT_UINT64
10697 #       }
10698 #     }
10699 #   }
10700 #   attr {
10701 #     name: "padding"
10702 #     type: "string"
10703 #     allowed_values {
10704 #       list {
10705 #         s: "SAME"
10706 #         s: "VALID"
10707 #       }
10708 #     }
10709 #   }
10710 # }
10711 # op {
10712 #   name: "FakeQuantWithMinMaxArgs"
10713 #   input_arg {
10714 #     name: "inputs"
10715 #     type: DT_FLOAT
10716 #   }
10717 #   output_arg {
10718 #     name: "outputs"
10719 #     type: DT_FLOAT
10720 #   }
10721 #   attr {
10722 #     name: "min"
10723 #     type: "float"
10724 #     default_value {
10725 #       f: -6
10726 #     }
10727 #   }
10728 #   attr {
10729 #     name: "max"
10730 #     type: "float"
10731 #     default_value {
10732 #       f: 6
10733 #     }
10734 #   }
10735 #   attr {
10736 #     name: "num_bits"
10737 #     type: "int"
10738 #     default_value {
10739 #       i: 8
10740 #     }
10741 #   }
10742 #   attr {
10743 #     name: "narrow_range"
10744 #     type: "bool"
10745 #     default_value {
10746 #       b: false
10747 #     }
10748 #   }
10749 # }
10750 # op {
10751 #   name: "FakeQuantWithMinMaxArgsGradient"
10752 #   input_arg {
10753 #     name: "gradients"
10754 #     type: DT_FLOAT
10755 #   }
10756 #   input_arg {
10757 #     name: "inputs"
10758 #     type: DT_FLOAT
10759 #   }
10760 #   output_arg {
10761 #     name: "backprops"
10762 #     type: DT_FLOAT
10763 #   }
10764 #   attr {
10765 #     name: "min"
10766 #     type: "float"
10767 #     default_value {
10768 #       f: -6
10769 #     }
10770 #   }
10771 #   attr {
10772 #     name: "max"
10773 #     type: "float"
10774 #     default_value {
10775 #       f: 6
10776 #     }
10777 #   }
10778 #   attr {
10779 #     name: "num_bits"
10780 #     type: "int"
10781 #     default_value {
10782 #       i: 8
10783 #     }
10784 #   }
10785 #   attr {
10786 #     name: "narrow_range"
10787 #     type: "bool"
10788 #     default_value {
10789 #       b: false
10790 #     }
10791 #   }
10792 # }
10793 # op {
10794 #   name: "FakeQuantWithMinMaxVars"
10795 #   input_arg {
10796 #     name: "inputs"
10797 #     type: DT_FLOAT
10798 #   }
10799 #   input_arg {
10800 #     name: "min"
10801 #     type: DT_FLOAT
10802 #   }
10803 #   input_arg {
10804 #     name: "max"
10805 #     type: DT_FLOAT
10806 #   }
10807 #   output_arg {
10808 #     name: "outputs"
10809 #     type: DT_FLOAT
10810 #   }
10811 #   attr {
10812 #     name: "num_bits"
10813 #     type: "int"
10814 #     default_value {
10815 #       i: 8
10816 #     }
10817 #   }
10818 #   attr {
10819 #     name: "narrow_range"
10820 #     type: "bool"
10821 #     default_value {
10822 #       b: false
10823 #     }
10824 #   }
10825 # }
10826 # op {
10827 #   name: "FakeQuantWithMinMaxVarsGradient"
10828 #   input_arg {
10829 #     name: "gradients"
10830 #     type: DT_FLOAT
10831 #   }
10832 #   input_arg {
10833 #     name: "inputs"
10834 #     type: DT_FLOAT
10835 #   }
10836 #   input_arg {
10837 #     name: "min"
10838 #     type: DT_FLOAT
10839 #   }
10840 #   input_arg {
10841 #     name: "max"
10842 #     type: DT_FLOAT
10843 #   }
10844 #   output_arg {
10845 #     name: "backprops_wrt_input"
10846 #     type: DT_FLOAT
10847 #   }
10848 #   output_arg {
10849 #     name: "backprop_wrt_min"
10850 #     type: DT_FLOAT
10851 #   }
10852 #   output_arg {
10853 #     name: "backprop_wrt_max"
10854 #     type: DT_FLOAT
10855 #   }
10856 #   attr {
10857 #     name: "num_bits"
10858 #     type: "int"
10859 #     default_value {
10860 #       i: 8
10861 #     }
10862 #   }
10863 #   attr {
10864 #     name: "narrow_range"
10865 #     type: "bool"
10866 #     default_value {
10867 #       b: false
10868 #     }
10869 #   }
10870 # }
10871 # op {
10872 #   name: "FakeQuantWithMinMaxVarsPerChannel"
10873 #   input_arg {
10874 #     name: "inputs"
10875 #     type: DT_FLOAT
10876 #   }
10877 #   input_arg {
10878 #     name: "min"
10879 #     type: DT_FLOAT
10880 #   }
10881 #   input_arg {
10882 #     name: "max"
10883 #     type: DT_FLOAT
10884 #   }
10885 #   output_arg {
10886 #     name: "outputs"
10887 #     type: DT_FLOAT
10888 #   }
10889 #   attr {
10890 #     name: "num_bits"
10891 #     type: "int"
10892 #     default_value {
10893 #       i: 8
10894 #     }
10895 #   }
10896 #   attr {
10897 #     name: "narrow_range"
10898 #     type: "bool"
10899 #     default_value {
10900 #       b: false
10901 #     }
10902 #   }
10903 # }
10904 # op {
10905 #   name: "FakeQuantWithMinMaxVarsPerChannelGradient"
10906 #   input_arg {
10907 #     name: "gradients"
10908 #     type: DT_FLOAT
10909 #   }
10910 #   input_arg {
10911 #     name: "inputs"
10912 #     type: DT_FLOAT
10913 #   }
10914 #   input_arg {
10915 #     name: "min"
10916 #     type: DT_FLOAT
10917 #   }
10918 #   input_arg {
10919 #     name: "max"
10920 #     type: DT_FLOAT
10921 #   }
10922 #   output_arg {
10923 #     name: "backprops_wrt_input"
10924 #     type: DT_FLOAT
10925 #   }
10926 #   output_arg {
10927 #     name: "backprop_wrt_min"
10928 #     type: DT_FLOAT
10929 #   }
10930 #   output_arg {
10931 #     name: "backprop_wrt_max"
10932 #     type: DT_FLOAT
10933 #   }
10934 #   attr {
10935 #     name: "num_bits"
10936 #     type: "int"
10937 #     default_value {
10938 #       i: 8
10939 #     }
10940 #   }
10941 #   attr {
10942 #     name: "narrow_range"
10943 #     type: "bool"
10944 #     default_value {
10945 #       b: false
10946 #     }
10947 #   }
10948 # }
10949 # op {
10950 #   name: "Fill"
10951 #   input_arg {
10952 #     name: "dims"
10953 #     type_attr: "index_type"
10954 #   }
10955 #   input_arg {
10956 #     name: "value"
10957 #     type_attr: "T"
10958 #   }
10959 #   output_arg {
10960 #     name: "output"
10961 #     type_attr: "T"
10962 #   }
10963 #   attr {
10964 #     name: "T"
10965 #     type: "type"
10966 #   }
10967 #   attr {
10968 #     name: "index_type"
10969 #     type: "type"
10970 #     default_value {
10971 #       type: DT_INT32
10972 #     }
10973 #     allowed_values {
10974 #       list {
10975 #         type: DT_INT32
10976 #         type: DT_INT64
10977 #       }
10978 #     }
10979 #   }
10980 # }
10981 # op {
10982 #   name: "Gather"
10983 #   input_arg {
10984 #     name: "params"
10985 #     type_attr: "Tparams"
10986 #   }
10987 #   input_arg {
10988 #     name: "indices"
10989 #     type_attr: "Tindices"
10990 #   }
10991 #   output_arg {
10992 #     name: "output"
10993 #     type_attr: "Tparams"
10994 #   }
10995 #   attr {
10996 #     name: "validate_indices"
10997 #     type: "bool"
10998 #     default_value {
10999 #       b: true
11000 #     }
11001 #   }
11002 #   attr {
11003 #     name: "Tparams"
11004 #     type: "type"
11005 #   }
11006 #   attr {
11007 #     name: "Tindices"
11008 #     type: "type"
11009 #     allowed_values {
11010 #       list {
11011 #         type: DT_INT32
11012 #         type: DT_INT64
11013 #       }
11014 #     }
11015 #   }
11016 # }
11017 # op {
11018 #   name: "GatherNd"
11019 #   input_arg {
11020 #     name: "params"
11021 #     type_attr: "Tparams"
11022 #   }
11023 #   input_arg {
11024 #     name: "indices"
11025 #     type_attr: "Tindices"
11026 #   }
11027 #   output_arg {
11028 #     name: "output"
11029 #     type_attr: "Tparams"
11030 #   }
11031 #   attr {
11032 #     name: "Tparams"
11033 #     type: "type"
11034 #   }
11035 #   attr {
11036 #     name: "Tindices"
11037 #     type: "type"
11038 #     allowed_values {
11039 #       list {
11040 #         type: DT_INT32
11041 #         type: DT_INT64
11042 #       }
11043 #     }
11044 #   }
11045 # }
11046 # op {
11047 #   name: "GatherV2"
11048 #   input_arg {
11049 #     name: "params"
11050 #     type_attr: "Tparams"
11051 #   }
11052 #   input_arg {
11053 #     name: "indices"
11054 #     type_attr: "Tindices"
11055 #   }
11056 #   input_arg {
11057 #     name: "axis"
11058 #     type_attr: "Taxis"
11059 #   }
11060 #   output_arg {
11061 #     name: "output"
11062 #     type_attr: "Tparams"
11063 #   }
11064 #   attr {
11065 #     name: "Tparams"
11066 #     type: "type"
11067 #   }
11068 #   attr {
11069 #     name: "Tindices"
11070 #     type: "type"
11071 #     allowed_values {
11072 #       list {
11073 #         type: DT_INT32
11074 #         type: DT_INT64
11075 #       }
11076 #     }
11077 #   }
11078 #   attr {
11079 #     name: "Taxis"
11080 #     type: "type"
11081 #     allowed_values {
11082 #       list {
11083 #         type: DT_INT32
11084 #         type: DT_INT64
11085 #       }
11086 #     }
11087 #   }
11088 # }
11089 # op {
11090 #   name: "GuaranteeConst"
11091 #   input_arg {
11092 #     name: "input"
11093 #     type_attr: "T"
11094 #   }
11095 #   output_arg {
11096 #     name: "output"
11097 #     type_attr: "T"
11098 #   }
11099 #   attr {
11100 #     name: "T"
11101 #     type: "type"
11102 #   }
11103 #   is_stateful: true
11104 # }
11105 # op {
11106 #   name: "Identity"
11107 #   input_arg {
11108 #     name: "input"
11109 #     type_attr: "T"
11110 #   }
11111 #   output_arg {
11112 #     name: "output"
11113 #     type_attr: "T"
11114 #   }
11115 #   attr {
11116 #     name: "T"
11117 #     type: "type"
11118 #   }
11119 # }
11120 # op {
11121 #   name: "IdentityN"
11122 #   input_arg {
11123 #     name: "input"
11124 #     type_list_attr: "T"
11125 #   }
11126 #   output_arg {
11127 #     name: "output"
11128 #     type_list_attr: "T"
11129 #   }
11130 #   attr {
11131 #     name: "T"
11132 #     type: "list(type)"
11133 #     has_minimum: true
11134 #     minimum: 1
11135 #   }
11136 # }
11137 # op {
11138 #   name: "ImmutableConst"
11139 #   output_arg {
11140 #     name: "tensor"
11141 #     type_attr: "dtype"
11142 #   }
11143 #   attr {
11144 #     name: "dtype"
11145 #     type: "type"
11146 #   }
11147 #   attr {
11148 #     name: "shape"
11149 #     type: "shape"
11150 #   }
11151 #   attr {
11152 #     name: "memory_region_name"
11153 #     type: "string"
11154 #   }
11155 # }
11156 # op {
11157 #   name: "InplaceAdd"
11158 #   input_arg {
11159 #     name: "x"
11160 #     type_attr: "T"
11161 #   }
11162 #   input_arg {
11163 #     name: "i"
11164 #     type: DT_INT32
11165 #   }
11166 #   input_arg {
11167 #     name: "v"
11168 #     type_attr: "T"
11169 #   }
11170 #   output_arg {
11171 #     name: "y"
11172 #     type_attr: "T"
11173 #   }
11174 #   attr {
11175 #     name: "T"
11176 #     type: "type"
11177 #   }
11178 # }
11179 # op {
11180 #   name: "InplaceSub"
11181 #   input_arg {
11182 #     name: "x"
11183 #     type_attr: "T"
11184 #   }
11185 #   input_arg {
11186 #     name: "i"
11187 #     type: DT_INT32
11188 #   }
11189 #   input_arg {
11190 #     name: "v"
11191 #     type_attr: "T"
11192 #   }
11193 #   output_arg {
11194 #     name: "y"
11195 #     type_attr: "T"
11196 #   }
11197 #   attr {
11198 #     name: "T"
11199 #     type: "type"
11200 #   }
11201 # }
11202 # op {
11203 #   name: "InplaceUpdate"
11204 #   input_arg {
11205 #     name: "x"
11206 #     type_attr: "T"
11207 #   }
11208 #   input_arg {
11209 #     name: "i"
11210 #     type: DT_INT32
11211 #   }
11212 #   input_arg {
11213 #     name: "v"
11214 #     type_attr: "T"
11215 #   }
11216 #   output_arg {
11217 #     name: "y"
11218 #     type_attr: "T"
11219 #   }
11220 #   attr {
11221 #     name: "T"
11222 #     type: "type"
11223 #   }
11224 # }
11225 # op {
11226 #   name: "InvertPermutation"
11227 #   input_arg {
11228 #     name: "x"
11229 #     type_attr: "T"
11230 #   }
11231 #   output_arg {
11232 #     name: "y"
11233 #     type_attr: "T"
11234 #   }
11235 #   attr {
11236 #     name: "T"
11237 #     type: "type"
11238 #     default_value {
11239 #       type: DT_INT32
11240 #     }
11241 #     allowed_values {
11242 #       list {
11243 #         type: DT_INT32
11244 #         type: DT_INT64
11245 #       }
11246 #     }
11247 #   }
11248 # }
11249 # op {
11250 #   name: "ListDiff"
11251 #   input_arg {
11252 #     name: "x"
11253 #     type_attr: "T"
11254 #   }
11255 #   input_arg {
11256 #     name: "y"
11257 #     type_attr: "T"
11258 #   }
11259 #   output_arg {
11260 #     name: "out"
11261 #     type_attr: "T"
11262 #   }
11263 #   output_arg {
11264 #     name: "idx"
11265 #     type_attr: "out_idx"
11266 #   }
11267 #   attr {
11268 #     name: "T"
11269 #     type: "type"
11270 #   }
11271 #   attr {
11272 #     name: "out_idx"
11273 #     type: "type"
11274 #     default_value {
11275 #       type: DT_INT32
11276 #     }
11277 #     allowed_values {
11278 #       list {
11279 #         type: DT_INT32
11280 #         type: DT_INT64
11281 #       }
11282 #     }
11283 #   }
11284 # }
11285 # op {
11286 #   name: "LowerBound"
11287 #   input_arg {
11288 #     name: "sorted_inputs"
11289 #     type_attr: "T"
11290 #   }
11291 #   input_arg {
11292 #     name: "values"
11293 #     type_attr: "T"
11294 #   }
11295 #   output_arg {
11296 #     name: "output"
11297 #     type_attr: "out_type"
11298 #   }
11299 #   attr {
11300 #     name: "T"
11301 #     type: "type"
11302 #   }
11303 #   attr {
11304 #     name: "out_type"
11305 #     type: "type"
11306 #     default_value {
11307 #       type: DT_INT32
11308 #     }
11309 #     allowed_values {
11310 #       list {
11311 #         type: DT_INT32
11312 #         type: DT_INT64
11313 #       }
11314 #     }
11315 #   }
11316 # }
11317 # op {
11318 #   name: "MatrixBandPart"
11319 #   input_arg {
11320 #     name: "input"
11321 #     type_attr: "T"
11322 #   }
11323 #   input_arg {
11324 #     name: "num_lower"
11325 #     type_attr: "Tindex"
11326 #   }
11327 #   input_arg {
11328 #     name: "num_upper"
11329 #     type_attr: "Tindex"
11330 #   }
11331 #   output_arg {
11332 #     name: "band"
11333 #     type_attr: "T"
11334 #   }
11335 #   attr {
11336 #     name: "T"
11337 #     type: "type"
11338 #   }
11339 #   attr {
11340 #     name: "Tindex"
11341 #     type: "type"
11342 #     default_value {
11343 #       type: DT_INT64
11344 #     }
11345 #     allowed_values {
11346 #       list {
11347 #         type: DT_INT32
11348 #         type: DT_INT64
11349 #       }
11350 #     }
11351 #   }
11352 # }
11353 # op {
11354 #   name: "MatrixDiag"
11355 #   input_arg {
11356 #     name: "diagonal"
11357 #     type_attr: "T"
11358 #   }
11359 #   output_arg {
11360 #     name: "output"
11361 #     type_attr: "T"
11362 #   }
11363 #   attr {
11364 #     name: "T"
11365 #     type: "type"
11366 #   }
11367 # }
11368 # op {
11369 #   name: "MatrixDiagPart"
11370 #   input_arg {
11371 #     name: "input"
11372 #     type_attr: "T"
11373 #   }
11374 #   output_arg {
11375 #     name: "diagonal"
11376 #     type_attr: "T"
11377 #   }
11378 #   attr {
11379 #     name: "T"
11380 #     type: "type"
11381 #   }
11382 # }
11383 # op {
11384 #   name: "MatrixSetDiag"
11385 #   input_arg {
11386 #     name: "input"
11387 #     type_attr: "T"
11388 #   }
11389 #   input_arg {
11390 #     name: "diagonal"
11391 #     type_attr: "T"
11392 #   }
11393 #   output_arg {
11394 #     name: "output"
11395 #     type_attr: "T"
11396 #   }
11397 #   attr {
11398 #     name: "T"
11399 #     type: "type"
11400 #   }
11401 # }
11402 # op {
11403 #   name: "MirrorPad"
11404 #   input_arg {
11405 #     name: "input"
11406 #     type_attr: "T"
11407 #   }
11408 #   input_arg {
11409 #     name: "paddings"
11410 #     type_attr: "Tpaddings"
11411 #   }
11412 #   output_arg {
11413 #     name: "output"
11414 #     type_attr: "T"
11415 #   }
11416 #   attr {
11417 #     name: "T"
11418 #     type: "type"
11419 #   }
11420 #   attr {
11421 #     name: "Tpaddings"
11422 #     type: "type"
11423 #     default_value {
11424 #       type: DT_INT32
11425 #     }
11426 #     allowed_values {
11427 #       list {
11428 #         type: DT_INT32
11429 #         type: DT_INT64
11430 #       }
11431 #     }
11432 #   }
11433 #   attr {
11434 #     name: "mode"
11435 #     type: "string"
11436 #     allowed_values {
11437 #       list {
11438 #         s: "REFLECT"
11439 #         s: "SYMMETRIC"
11440 #       }
11441 #     }
11442 #   }
11443 # }
11444 # op {
11445 #   name: "MirrorPadGrad"
11446 #   input_arg {
11447 #     name: "input"
11448 #     type_attr: "T"
11449 #   }
11450 #   input_arg {
11451 #     name: "paddings"
11452 #     type_attr: "Tpaddings"
11453 #   }
11454 #   output_arg {
11455 #     name: "output"
11456 #     type_attr: "T"
11457 #   }
11458 #   attr {
11459 #     name: "T"
11460 #     type: "type"
11461 #   }
11462 #   attr {
11463 #     name: "Tpaddings"
11464 #     type: "type"
11465 #     default_value {
11466 #       type: DT_INT32
11467 #     }
11468 #     allowed_values {
11469 #       list {
11470 #         type: DT_INT32
11471 #         type: DT_INT64
11472 #       }
11473 #     }
11474 #   }
11475 #   attr {
11476 #     name: "mode"
11477 #     type: "string"
11478 #     allowed_values {
11479 #       list {
11480 #         s: "REFLECT"
11481 #         s: "SYMMETRIC"
11482 #       }
11483 #     }
11484 #   }
11485 # }
11486 # op {
11487 #   name: "OneHot"
11488 #   input_arg {
11489 #     name: "indices"
11490 #     type_attr: "TI"
11491 #   }
11492 #   input_arg {
11493 #     name: "depth"
11494 #     type: DT_INT32
11495 #   }
11496 #   input_arg {
11497 #     name: "on_value"
11498 #     type_attr: "T"
11499 #   }
11500 #   input_arg {
11501 #     name: "off_value"
11502 #     type_attr: "T"
11503 #   }
11504 #   output_arg {
11505 #     name: "output"
11506 #     type_attr: "T"
11507 #   }
11508 #   attr {
11509 #     name: "axis"
11510 #     type: "int"
11511 #     default_value {
11512 #       i: -1
11513 #     }
11514 #   }
11515 #   attr {
11516 #     name: "T"
11517 #     type: "type"
11518 #   }
11519 #   attr {
11520 #     name: "TI"
11521 #     type: "type"
11522 #     default_value {
11523 #       type: DT_INT64
11524 #     }
11525 #     allowed_values {
11526 #       list {
11527 #         type: DT_UINT8
11528 #         type: DT_INT32
11529 #         type: DT_INT64
11530 #       }
11531 #     }
11532 #   }
11533 # }
11534 # op {
11535 #   name: "OnesLike"
11536 #   input_arg {
11537 #     name: "x"
11538 #     type_attr: "T"
11539 #   }
11540 #   output_arg {
11541 #     name: "y"
11542 #     type_attr: "T"
11543 #   }
11544 #   attr {
11545 #     name: "T"
11546 #     type: "type"
11547 #     allowed_values {
11548 #       list {
11549 #         type: DT_BFLOAT16
11550 #         type: DT_HALF
11551 #         type: DT_FLOAT
11552 #         type: DT_DOUBLE
11553 #         type: DT_INT8
11554 #         type: DT_UINT8
11555 #         type: DT_INT16
11556 #         type: DT_UINT16
11557 #         type: DT_INT32
11558 #         type: DT_INT64
11559 #         type: DT_COMPLEX64
11560 #         type: DT_COMPLEX128
11561 #         type: DT_BOOL
11562 #       }
11563 #     }
11564 #   }
11565 # }
11566 # op {
11567 #   name: "Pack"
11568 #   input_arg {
11569 #     name: "values"
11570 #     type_attr: "T"
11571 #     number_attr: "N"
11572 #   }
11573 #   output_arg {
11574 #     name: "output"
11575 #     type_attr: "T"
11576 #   }
11577 #   attr {
11578 #     name: "N"
11579 #     type: "int"
11580 #     has_minimum: true
11581 #     minimum: 1
11582 #   }
11583 #   attr {
11584 #     name: "T"
11585 #     type: "type"
11586 #   }
11587 #   attr {
11588 #     name: "axis"
11589 #     type: "int"
11590 #     default_value {
11591 #       i: 0
11592 #     }
11593 #   }
11594 # }
11595 # op {
11596 #   name: "Pad"
11597 #   input_arg {
11598 #     name: "input"
11599 #     type_attr: "T"
11600 #   }
11601 #   input_arg {
11602 #     name: "paddings"
11603 #     type_attr: "Tpaddings"
11604 #   }
11605 #   output_arg {
11606 #     name: "output"
11607 #     type_attr: "T"
11608 #   }
11609 #   attr {
11610 #     name: "T"
11611 #     type: "type"
11612 #   }
11613 #   attr {
11614 #     name: "Tpaddings"
11615 #     type: "type"
11616 #     default_value {
11617 #       type: DT_INT32
11618 #     }
11619 #     allowed_values {
11620 #       list {
11621 #         type: DT_INT32
11622 #         type: DT_INT64
11623 #       }
11624 #     }
11625 #   }
11626 # }
11627 # op {
11628 #   name: "PadV2"
11629 #   input_arg {
11630 #     name: "input"
11631 #     type_attr: "T"
11632 #   }
11633 #   input_arg {
11634 #     name: "paddings"
11635 #     type_attr: "Tpaddings"
11636 #   }
11637 #   input_arg {
11638 #     name: "constant_values"
11639 #     type_attr: "T"
11640 #   }
11641 #   output_arg {
11642 #     name: "output"
11643 #     type_attr: "T"
11644 #   }
11645 #   attr {
11646 #     name: "T"
11647 #     type: "type"
11648 #   }
11649 #   attr {
11650 #     name: "Tpaddings"
11651 #     type: "type"
11652 #     default_value {
11653 #       type: DT_INT32
11654 #     }
11655 #     allowed_values {
11656 #       list {
11657 #         type: DT_INT32
11658 #         type: DT_INT64
11659 #       }
11660 #     }
11661 #   }
11662 # }
11663 # op {
11664 #   name: "ParallelConcat"
11665 #   input_arg {
11666 #     name: "values"
11667 #     type_attr: "T"
11668 #     number_attr: "N"
11669 #   }
11670 #   output_arg {
11671 #     name: "output"
11672 #     type_attr: "T"
11673 #   }
11674 #   attr {
11675 #     name: "N"
11676 #     type: "int"
11677 #     has_minimum: true
11678 #     minimum: 1
11679 #   }
11680 #   attr {
11681 #     name: "T"
11682 #     type: "type"
11683 #   }
11684 #   attr {
11685 #     name: "shape"
11686 #     type: "shape"
11687 #   }
11688 # }
11689 # op {
11690 #   name: "Placeholder"
11691 #   output_arg {
11692 #     name: "output"
11693 #     type_attr: "dtype"
11694 #   }
11695 #   attr {
11696 #     name: "dtype"
11697 #     type: "type"
11698 #   }
11699 #   attr {
11700 #     name: "shape"
11701 #     type: "shape"
11702 #     default_value {
11703 #       shape {
11704 #         unknown_rank: true
11705 #       }
11706 #     }
11707 #   }
11708 # }
11709 # op {
11710 #   name: "PlaceholderV2"
11711 #   output_arg {
11712 #     name: "output"
11713 #     type_attr: "dtype"
11714 #   }
11715 #   attr {
11716 #     name: "dtype"
11717 #     type: "type"
11718 #   }
11719 #   attr {
11720 #     name: "shape"
11721 #     type: "shape"
11722 #   }
11723 #   deprecation {
11724 #     version: 23
11725 #     explanation: "Placeholder now behaves the same as PlaceholderV2."
11726 #   }
11727 # }
11728 # op {
11729 #   name: "PlaceholderWithDefault"
11730 #   input_arg {
11731 #     name: "input"
11732 #     type_attr: "dtype"
11733 #   }
11734 #   output_arg {
11735 #     name: "output"
11736 #     type_attr: "dtype"
11737 #   }
11738 #   attr {
11739 #     name: "dtype"
11740 #     type: "type"
11741 #   }
11742 #   attr {
11743 #     name: "shape"
11744 #     type: "shape"
11745 #   }
11746 # }
11747 # op {
11748 #   name: "PreventGradient"
11749 #   input_arg {
11750 #     name: "input"
11751 #     type_attr: "T"
11752 #   }
11753 #   output_arg {
11754 #     name: "output"
11755 #     type_attr: "T"
11756 #   }
11757 #   attr {
11758 #     name: "T"
11759 #     type: "type"
11760 #   }
11761 #   attr {
11762 #     name: "message"
11763 #     type: "string"
11764 #     default_value {
11765 #       s: ""
11766 #     }
11767 #   }
11768 # }
11769 # op {
11770 #   name: "QuantizeAndDequantize"
11771 #   input_arg {
11772 #     name: "input"
11773 #     type_attr: "T"
11774 #   }
11775 #   output_arg {
11776 #     name: "output"
11777 #     type_attr: "T"
11778 #   }
11779 #   attr {
11780 #     name: "signed_input"
11781 #     type: "bool"
11782 #     default_value {
11783 #       b: true
11784 #     }
11785 #   }
11786 #   attr {
11787 #     name: "num_bits"
11788 #     type: "int"
11789 #     default_value {
11790 #       i: 8
11791 #     }
11792 #   }
11793 #   attr {
11794 #     name: "range_given"
11795 #     type: "bool"
11796 #     default_value {
11797 #       b: false
11798 #     }
11799 #   }
11800 #   attr {
11801 #     name: "input_min"
11802 #     type: "float"
11803 #     default_value {
11804 #       f: 0
11805 #     }
11806 #   }
11807 #   attr {
11808 #     name: "input_max"
11809 #     type: "float"
11810 #     default_value {
11811 #       f: 0
11812 #     }
11813 #   }
11814 #   attr {
11815 #     name: "T"
11816 #     type: "type"
11817 #     allowed_values {
11818 #       list {
11819 #         type: DT_BFLOAT16
11820 #         type: DT_HALF
11821 #         type: DT_FLOAT
11822 #         type: DT_DOUBLE
11823 #       }
11824 #     }
11825 #   }
11826 #   deprecation {
11827 #     version: 22
11828 #     explanation: "Replaced by QuantizeAndDequantizeV2"
11829 #   }
11830 # }
11831 # op {
11832 #   name: "QuantizeAndDequantizeV2"
11833 #   input_arg {
11834 #     name: "input"
11835 #     type_attr: "T"
11836 #   }
11837 #   input_arg {
11838 #     name: "input_min"
11839 #     type_attr: "T"
11840 #   }
11841 #   input_arg {
11842 #     name: "input_max"
11843 #     type_attr: "T"
11844 #   }
11845 #   output_arg {
11846 #     name: "output"
11847 #     type_attr: "T"
11848 #   }
11849 #   attr {
11850 #     name: "signed_input"
11851 #     type: "bool"
11852 #     default_value {
11853 #       b: true
11854 #     }
11855 #   }
11856 #   attr {
11857 #     name: "num_bits"
11858 #     type: "int"
11859 #     default_value {
11860 #       i: 8
11861 #     }
11862 #   }
11863 #   attr {
11864 #     name: "range_given"
11865 #     type: "bool"
11866 #     default_value {
11867 #       b: false
11868 #     }
11869 #   }
11870 #   attr {
11871 #     name: "T"
11872 #     type: "type"
11873 #     allowed_values {
11874 #       list {
11875 #         type: DT_BFLOAT16
11876 #         type: DT_HALF
11877 #         type: DT_FLOAT
11878 #         type: DT_DOUBLE
11879 #       }
11880 #     }
11881 #   }
11882 # }
11883 # op {
11884 #   name: "QuantizeAndDequantizeV3"
11885 #   input_arg {
11886 #     name: "input"
11887 #     type_attr: "T"
11888 #   }
11889 #   input_arg {
11890 #     name: "input_min"
11891 #     type_attr: "T"
11892 #   }
11893 #   input_arg {
11894 #     name: "input_max"
11895 #     type_attr: "T"
11896 #   }
11897 #   input_arg {
11898 #     name: "num_bits"
11899 #     type: DT_INT32
11900 #   }
11901 #   output_arg {
11902 #     name: "output"
11903 #     type_attr: "T"
11904 #   }
11905 #   attr {
11906 #     name: "signed_input"
11907 #     type: "bool"
11908 #     default_value {
11909 #       b: true
11910 #     }
11911 #   }
11912 #   attr {
11913 #     name: "range_given"
11914 #     type: "bool"
11915 #     default_value {
11916 #       b: true
11917 #     }
11918 #   }
11919 #   attr {
11920 #     name: "T"
11921 #     type: "type"
11922 #     allowed_values {
11923 #       list {
11924 #         type: DT_BFLOAT16
11925 #         type: DT_HALF
11926 #         type: DT_FLOAT
11927 #         type: DT_DOUBLE
11928 #       }
11929 #     }
11930 #   }
11931 # }
11932 # op {
11933 #   name: "QuantizeV2"
11934 #   input_arg {
11935 #     name: "input"
11936 #     type: DT_FLOAT
11937 #   }
11938 #   input_arg {
11939 #     name: "min_range"
11940 #     type: DT_FLOAT
11941 #   }
11942 #   input_arg {
11943 #     name: "max_range"
11944 #     type: DT_FLOAT
11945 #   }
11946 #   output_arg {
11947 #     name: "output"
11948 #     type_attr: "T"
11949 #   }
11950 #   output_arg {
11951 #     name: "output_min"
11952 #     type: DT_FLOAT
11953 #   }
11954 #   output_arg {
11955 #     name: "output_max"
11956 #     type: DT_FLOAT
11957 #   }
11958 #   attr {
11959 #     name: "T"
11960 #     type: "type"
11961 #     allowed_values {
11962 #       list {
11963 #         type: DT_QINT8
11964 #         type: DT_QUINT8
11965 #         type: DT_QINT32
11966 #         type: DT_QINT16
11967 #         type: DT_QUINT16
11968 #       }
11969 #     }
11970 #   }
11971 #   attr {
11972 #     name: "mode"
11973 #     type: "string"
11974 #     default_value {
11975 #       s: "MIN_COMBINED"
11976 #     }
11977 #     allowed_values {
11978 #       list {
11979 #         s: "MIN_COMBINED"
11980 #         s: "MIN_FIRST"
11981 #         s: "SCALED"
11982 #       }
11983 #     }
11984 #   }
11985 #   attr {
11986 #     name: "round_mode"
11987 #     type: "string"
11988 #     default_value {
11989 #       s: "HALF_AWAY_FROM_ZERO"
11990 #     }
11991 #     allowed_values {
11992 #       list {
11993 #         s: "HALF_AWAY_FROM_ZERO"
11994 #         s: "HALF_TO_EVEN"
11995 #       }
11996 #     }
11997 #   }
11998 # }
11999 # op {
12000 #   name: "QuantizedConcat"
12001 #   input_arg {
12002 #     name: "concat_dim"
12003 #     type: DT_INT32
12004 #   }
12005 #   input_arg {
12006 #     name: "values"
12007 #     type_attr: "T"
12008 #     number_attr: "N"
12009 #   }
12010 #   input_arg {
12011 #     name: "input_mins"
12012 #     type: DT_FLOAT
12013 #     number_attr: "N"
12014 #   }
12015 #   input_arg {
12016 #     name: "input_maxes"
12017 #     type: DT_FLOAT
12018 #     number_attr: "N"
12019 #   }
12020 #   output_arg {
12021 #     name: "output"
12022 #     type_attr: "T"
12023 #   }
12024 #   output_arg {
12025 #     name: "output_min"
12026 #     type: DT_FLOAT
12027 #   }
12028 #   output_arg {
12029 #     name: "output_max"
12030 #     type: DT_FLOAT
12031 #   }
12032 #   attr {
12033 #     name: "N"
12034 #     type: "int"
12035 #     has_minimum: true
12036 #     minimum: 2
12037 #   }
12038 #   attr {
12039 #     name: "T"
12040 #     type: "type"
12041 #   }
12042 # }
12043 # op {
12044 #   name: "QuantizedInstanceNorm"
12045 #   input_arg {
12046 #     name: "x"
12047 #     type_attr: "T"
12048 #   }
12049 #   input_arg {
12050 #     name: "x_min"
12051 #     type: DT_FLOAT
12052 #   }
12053 #   input_arg {
12054 #     name: "x_max"
12055 #     type: DT_FLOAT
12056 #   }
12057 #   output_arg {
12058 #     name: "y"
12059 #     type_attr: "T"
12060 #   }
12061 #   output_arg {
12062 #     name: "y_min"
12063 #     type: DT_FLOAT
12064 #   }
12065 #   output_arg {
12066 #     name: "y_max"
12067 #     type: DT_FLOAT
12068 #   }
12069 #   attr {
12070 #     name: "T"
12071 #     type: "type"
12072 #     allowed_values {
12073 #       list {
12074 #         type: DT_QINT8
12075 #         type: DT_QUINT8
12076 #         type: DT_QINT32
12077 #         type: DT_QINT16
12078 #         type: DT_QUINT16
12079 #       }
12080 #     }
12081 #   }
12082 #   attr {
12083 #     name: "output_range_given"
12084 #     type: "bool"
12085 #     default_value {
12086 #       b: false
12087 #     }
12088 #   }
12089 #   attr {
12090 #     name: "given_y_min"
12091 #     type: "float"
12092 #     default_value {
12093 #       f: 0
12094 #     }
12095 #   }
12096 #   attr {
12097 #     name: "given_y_max"
12098 #     type: "float"
12099 #     default_value {
12100 #       f: 0
12101 #     }
12102 #   }
12103 #   attr {
12104 #     name: "variance_epsilon"
12105 #     type: "float"
12106 #     default_value {
12107 #       f: 1e-05
12108 #     }
12109 #   }
12110 #   attr {
12111 #     name: "min_separation"
12112 #     type: "float"
12113 #     default_value {
12114 #       f: 0.001
12115 #     }
12116 #   }
12117 # }
12118 # op {
12119 #   name: "QuantizedReshape"
12120 #   input_arg {
12121 #     name: "tensor"
12122 #     type_attr: "T"
12123 #   }
12124 #   input_arg {
12125 #     name: "shape"
12126 #     type_attr: "Tshape"
12127 #   }
12128 #   input_arg {
12129 #     name: "input_min"
12130 #     type: DT_FLOAT
12131 #   }
12132 #   input_arg {
12133 #     name: "input_max"
12134 #     type: DT_FLOAT
12135 #   }
12136 #   output_arg {
12137 #     name: "output"
12138 #     type_attr: "T"
12139 #   }
12140 #   output_arg {
12141 #     name: "output_min"
12142 #     type: DT_FLOAT
12143 #   }
12144 #   output_arg {
12145 #     name: "output_max"
12146 #     type: DT_FLOAT
12147 #   }
12148 #   attr {
12149 #     name: "T"
12150 #     type: "type"
12151 #   }
12152 #   attr {
12153 #     name: "Tshape"
12154 #     type: "type"
12155 #     default_value {
12156 #       type: DT_INT32
12157 #     }
12158 #     allowed_values {
12159 #       list {
12160 #         type: DT_INT32
12161 #         type: DT_INT64
12162 #       }
12163 #     }
12164 #   }
12165 # }
12166 # op {
12167 #   name: "Rank"
12168 #   input_arg {
12169 #     name: "input"
12170 #     type_attr: "T"
12171 #   }
12172 #   output_arg {
12173 #     name: "output"
12174 #     type: DT_INT32
12175 #   }
12176 #   attr {
12177 #     name: "T"
12178 #     type: "type"
12179 #   }
12180 # }
12181 # op {
12182 #   name: "RefIdentity"
12183 #   input_arg {
12184 #     name: "input"
12185 #     type_attr: "T"
12186 #     is_ref: true
12187 #   }
12188 #   output_arg {
12189 #     name: "output"
12190 #     type_attr: "T"
12191 #     is_ref: true
12192 #   }
12193 #   attr {
12194 #     name: "T"
12195 #     type: "type"
12196 #   }
12197 #   allows_uninitialized_input: true
12198 # }
12199 # op {
12200 #   name: "Reshape"
12201 #   input_arg {
12202 #     name: "tensor"
12203 #     type_attr: "T"
12204 #   }
12205 #   input_arg {
12206 #     name: "shape"
12207 #     type_attr: "Tshape"
12208 #   }
12209 #   output_arg {
12210 #     name: "output"
12211 #     type_attr: "T"
12212 #   }
12213 #   attr {
12214 #     name: "T"
12215 #     type: "type"
12216 #   }
12217 #   attr {
12218 #     name: "Tshape"
12219 #     type: "type"
12220 #     default_value {
12221 #       type: DT_INT32
12222 #     }
12223 #     allowed_values {
12224 #       list {
12225 #         type: DT_INT32
12226 #         type: DT_INT64
12227 #       }
12228 #     }
12229 #   }
12230 # }
12231 # op {
12232 #   name: "ResourceStridedSliceAssign"
12233 #   input_arg {
12234 #     name: "ref"
12235 #     type: DT_RESOURCE
12236 #   }
12237 #   input_arg {
12238 #     name: "begin"
12239 #     type_attr: "Index"
12240 #   }
12241 #   input_arg {
12242 #     name: "end"
12243 #     type_attr: "Index"
12244 #   }
12245 #   input_arg {
12246 #     name: "strides"
12247 #     type_attr: "Index"
12248 #   }
12249 #   input_arg {
12250 #     name: "value"
12251 #     type_attr: "T"
12252 #   }
12253 #   attr {
12254 #     name: "T"
12255 #     type: "type"
12256 #   }
12257 #   attr {
12258 #     name: "Index"
12259 #     type: "type"
12260 #     allowed_values {
12261 #       list {
12262 #         type: DT_INT32
12263 #         type: DT_INT64
12264 #       }
12265 #     }
12266 #   }
12267 #   attr {
12268 #     name: "begin_mask"
12269 #     type: "int"
12270 #     default_value {
12271 #       i: 0
12272 #     }
12273 #   }
12274 #   attr {
12275 #     name: "end_mask"
12276 #     type: "int"
12277 #     default_value {
12278 #       i: 0
12279 #     }
12280 #   }
12281 #   attr {
12282 #     name: "ellipsis_mask"
12283 #     type: "int"
12284 #     default_value {
12285 #       i: 0
12286 #     }
12287 #   }
12288 #   attr {
12289 #     name: "new_axis_mask"
12290 #     type: "int"
12291 #     default_value {
12292 #       i: 0
12293 #     }
12294 #   }
12295 #   attr {
12296 #     name: "shrink_axis_mask"
12297 #     type: "int"
12298 #     default_value {
12299 #       i: 0
12300 #     }
12301 #   }
12302 #   is_stateful: true
12303 # }
12304 # op {
12305 #   name: "Reverse"
12306 #   input_arg {
12307 #     name: "tensor"
12308 #     type_attr: "T"
12309 #   }
12310 #   input_arg {
12311 #     name: "dims"
12312 #     type: DT_BOOL
12313 #   }
12314 #   output_arg {
12315 #     name: "output"
12316 #     type_attr: "T"
12317 #   }
12318 #   attr {
12319 #     name: "T"
12320 #     type: "type"
12321 #     allowed_values {
12322 #       list {
12323 #         type: DT_UINT8
12324 #         type: DT_INT8
12325 #         type: DT_UINT16
12326 #         type: DT_INT16
12327 #         type: DT_INT32
12328 #         type: DT_INT64
12329 #         type: DT_BOOL
12330 #         type: DT_HALF
12331 #         type: DT_FLOAT
12332 #         type: DT_DOUBLE
12333 #         type: DT_COMPLEX64
12334 #         type: DT_COMPLEX128
12335 #         type: DT_STRING
12336 #       }
12337 #     }
12338 #   }
12339 # }
12340 # op {
12341 #   name: "ReverseSequence"
12342 #   input_arg {
12343 #     name: "input"
12344 #     type_attr: "T"
12345 #   }
12346 #   input_arg {
12347 #     name: "seq_lengths"
12348 #     type_attr: "Tlen"
12349 #   }
12350 #   output_arg {
12351 #     name: "output"
12352 #     type_attr: "T"
12353 #   }
12354 #   attr {
12355 #     name: "seq_dim"
12356 #     type: "int"
12357 #   }
12358 #   attr {
12359 #     name: "batch_dim"
12360 #     type: "int"
12361 #     default_value {
12362 #       i: 0
12363 #     }
12364 #   }
12365 #   attr {
12366 #     name: "T"
12367 #     type: "type"
12368 #   }
12369 #   attr {
12370 #     name: "Tlen"
12371 #     type: "type"
12372 #     default_value {
12373 #       type: DT_INT64
12374 #     }
12375 #     allowed_values {
12376 #       list {
12377 #         type: DT_INT32
12378 #         type: DT_INT64
12379 #       }
12380 #     }
12381 #   }
12382 # }
12383 # op {
12384 #   name: "ReverseV2"
12385 #   input_arg {
12386 #     name: "tensor"
12387 #     type_attr: "T"
12388 #   }
12389 #   input_arg {
12390 #     name: "axis"
12391 #     type_attr: "Tidx"
12392 #   }
12393 #   output_arg {
12394 #     name: "output"
12395 #     type_attr: "T"
12396 #   }
12397 #   attr {
12398 #     name: "Tidx"
12399 #     type: "type"
12400 #     default_value {
12401 #       type: DT_INT32
12402 #     }
12403 #     allowed_values {
12404 #       list {
12405 #         type: DT_INT32
12406 #         type: DT_INT64
12407 #       }
12408 #     }
12409 #   }
12410 #   attr {
12411 #     name: "T"
12412 #     type: "type"
12413 #     allowed_values {
12414 #       list {
12415 #         type: DT_UINT8
12416 #         type: DT_INT8
12417 #         type: DT_UINT16
12418 #         type: DT_INT16
12419 #         type: DT_INT32
12420 #         type: DT_INT64
12421 #         type: DT_BOOL
12422 #         type: DT_BFLOAT16
12423 #         type: DT_HALF
12424 #         type: DT_FLOAT
12425 #         type: DT_DOUBLE
12426 #         type: DT_COMPLEX64
12427 #         type: DT_COMPLEX128
12428 #         type: DT_STRING
12429 #       }
12430 #     }
12431 #   }
12432 # }
12433 # op {
12434 #   name: "ScatterNd"
12435 #   input_arg {
12436 #     name: "indices"
12437 #     type_attr: "Tindices"
12438 #   }
12439 #   input_arg {
12440 #     name: "updates"
12441 #     type_attr: "T"
12442 #   }
12443 #   input_arg {
12444 #     name: "shape"
12445 #     type_attr: "Tindices"
12446 #   }
12447 #   output_arg {
12448 #     name: "output"
12449 #     type_attr: "T"
12450 #   }
12451 #   attr {
12452 #     name: "T"
12453 #     type: "type"
12454 #   }
12455 #   attr {
12456 #     name: "Tindices"
12457 #     type: "type"
12458 #     allowed_values {
12459 #       list {
12460 #         type: DT_INT32
12461 #         type: DT_INT64
12462 #       }
12463 #     }
12464 #   }
12465 # }
12466 # op {
12467 #   name: "ScatterNdNonAliasingAdd"
12468 #   input_arg {
12469 #     name: "input"
12470 #     type_attr: "T"
12471 #   }
12472 #   input_arg {
12473 #     name: "indices"
12474 #     type_attr: "Tindices"
12475 #   }
12476 #   input_arg {
12477 #     name: "updates"
12478 #     type_attr: "T"
12479 #   }
12480 #   output_arg {
12481 #     name: "output"
12482 #     type_attr: "T"
12483 #   }
12484 #   attr {
12485 #     name: "T"
12486 #     type: "type"
12487 #     allowed_values {
12488 #       list {
12489 #         type: DT_FLOAT
12490 #         type: DT_DOUBLE
12491 #         type: DT_INT32
12492 #         type: DT_UINT8
12493 #         type: DT_INT16
12494 #         type: DT_INT8
12495 #         type: DT_COMPLEX64
12496 #         type: DT_INT64
12497 #         type: DT_QINT8
12498 #         type: DT_QUINT8
12499 #         type: DT_QINT32
12500 #         type: DT_BFLOAT16
12501 #         type: DT_UINT16
12502 #         type: DT_COMPLEX128
12503 #         type: DT_HALF
12504 #         type: DT_UINT32
12505 #         type: DT_UINT64
12506 #         type: DT_BOOL
12507 #       }
12508 #     }
12509 #   }
12510 #   attr {
12511 #     name: "Tindices"
12512 #     type: "type"
12513 #     allowed_values {
12514 #       list {
12515 #         type: DT_INT32
12516 #         type: DT_INT64
12517 #       }
12518 #     }
12519 #   }
12520 # }
12521 # op {
12522 #   name: "Shape"
12523 #   input_arg {
12524 #     name: "input"
12525 #     type_attr: "T"
12526 #   }
12527 #   output_arg {
12528 #     name: "output"
12529 #     type_attr: "out_type"
12530 #   }
12531 #   attr {
12532 #     name: "T"
12533 #     type: "type"
12534 #   }
12535 #   attr {
12536 #     name: "out_type"
12537 #     type: "type"
12538 #     default_value {
12539 #       type: DT_INT32
12540 #     }
12541 #     allowed_values {
12542 #       list {
12543 #         type: DT_INT32
12544 #         type: DT_INT64
12545 #       }
12546 #     }
12547 #   }
12548 # }
12549 # op {
12550 #   name: "ShapeN"
12551 #   input_arg {
12552 #     name: "input"
12553 #     type_attr: "T"
12554 #     number_attr: "N"
12555 #   }
12556 #   output_arg {
12557 #     name: "output"
12558 #     type_attr: "out_type"
12559 #     number_attr: "N"
12560 #   }
12561 #   attr {
12562 #     name: "N"
12563 #     type: "int"
12564 #     has_minimum: true
12565 #     minimum: 1
12566 #   }
12567 #   attr {
12568 #     name: "T"
12569 #     type: "type"
12570 #   }
12571 #   attr {
12572 #     name: "out_type"
12573 #     type: "type"
12574 #     default_value {
12575 #       type: DT_INT32
12576 #     }
12577 #     allowed_values {
12578 #       list {
12579 #         type: DT_INT32
12580 #         type: DT_INT64
12581 #       }
12582 #     }
12583 #   }
12584 # }
12585 # op {
12586 #   name: "Size"
12587 #   input_arg {
12588 #     name: "input"
12589 #     type_attr: "T"
12590 #   }
12591 #   output_arg {
12592 #     name: "output"
12593 #     type_attr: "out_type"
12594 #   }
12595 #   attr {
12596 #     name: "T"
12597 #     type: "type"
12598 #   }
12599 #   attr {
12600 #     name: "out_type"
12601 #     type: "type"
12602 #     default_value {
12603 #       type: DT_INT32
12604 #     }
12605 #     allowed_values {
12606 #       list {
12607 #         type: DT_INT32
12608 #         type: DT_INT64
12609 #       }
12610 #     }
12611 #   }
12612 # }
12613 # op {
12614 #   name: "Slice"
12615 #   input_arg {
12616 #     name: "input"
12617 #     type_attr: "T"
12618 #   }
12619 #   input_arg {
12620 #     name: "begin"
12621 #     type_attr: "Index"
12622 #   }
12623 #   input_arg {
12624 #     name: "size"
12625 #     type_attr: "Index"
12626 #   }
12627 #   output_arg {
12628 #     name: "output"
12629 #     type_attr: "T"
12630 #   }
12631 #   attr {
12632 #     name: "T"
12633 #     type: "type"
12634 #   }
12635 #   attr {
12636 #     name: "Index"
12637 #     type: "type"
12638 #     allowed_values {
12639 #       list {
12640 #         type: DT_INT32
12641 #         type: DT_INT64
12642 #       }
12643 #     }
12644 #   }
12645 # }
12646 # op {
12647 #   name: "Snapshot"
12648 #   input_arg {
12649 #     name: "input"
12650 #     type_attr: "T"
12651 #   }
12652 #   output_arg {
12653 #     name: "output"
12654 #     type_attr: "T"
12655 #   }
12656 #   attr {
12657 #     name: "T"
12658 #     type: "type"
12659 #   }
12660 # }
12661 # op {
12662 #   name: "SpaceToBatch"
12663 #   input_arg {
12664 #     name: "input"
12665 #     type_attr: "T"
12666 #   }
12667 #   input_arg {
12668 #     name: "paddings"
12669 #     type_attr: "Tpaddings"
12670 #   }
12671 #   output_arg {
12672 #     name: "output"
12673 #     type_attr: "T"
12674 #   }
12675 #   attr {
12676 #     name: "T"
12677 #     type: "type"
12678 #   }
12679 #   attr {
12680 #     name: "Tpaddings"
12681 #     type: "type"
12682 #     default_value {
12683 #       type: DT_INT32
12684 #     }
12685 #     allowed_values {
12686 #       list {
12687 #         type: DT_INT32
12688 #         type: DT_INT64
12689 #       }
12690 #     }
12691 #   }
12692 #   attr {
12693 #     name: "block_size"
12694 #     type: "int"
12695 #     has_minimum: true
12696 #     minimum: 2
12697 #   }
12698 # }
12699 # op {
12700 #   name: "SpaceToBatchND"
12701 #   input_arg {
12702 #     name: "input"
12703 #     type_attr: "T"
12704 #   }
12705 #   input_arg {
12706 #     name: "block_shape"
12707 #     type_attr: "Tblock_shape"
12708 #   }
12709 #   input_arg {
12710 #     name: "paddings"
12711 #     type_attr: "Tpaddings"
12712 #   }
12713 #   output_arg {
12714 #     name: "output"
12715 #     type_attr: "T"
12716 #   }
12717 #   attr {
12718 #     name: "T"
12719 #     type: "type"
12720 #   }
12721 #   attr {
12722 #     name: "Tblock_shape"
12723 #     type: "type"
12724 #     default_value {
12725 #       type: DT_INT32
12726 #     }
12727 #     allowed_values {
12728 #       list {
12729 #         type: DT_INT32
12730 #         type: DT_INT64
12731 #       }
12732 #     }
12733 #   }
12734 #   attr {
12735 #     name: "Tpaddings"
12736 #     type: "type"
12737 #     default_value {
12738 #       type: DT_INT32
12739 #     }
12740 #     allowed_values {
12741 #       list {
12742 #         type: DT_INT32
12743 #         type: DT_INT64
12744 #       }
12745 #     }
12746 #   }
12747 # }
12748 # op {
12749 #   name: "SpaceToDepth"
12750 #   input_arg {
12751 #     name: "input"
12752 #     type_attr: "T"
12753 #   }
12754 #   output_arg {
12755 #     name: "output"
12756 #     type_attr: "T"
12757 #   }
12758 #   attr {
12759 #     name: "T"
12760 #     type: "type"
12761 #   }
12762 #   attr {
12763 #     name: "block_size"
12764 #     type: "int"
12765 #     has_minimum: true
12766 #     minimum: 2
12767 #   }
12768 #   attr {
12769 #     name: "data_format"
12770 #     type: "string"
12771 #     default_value {
12772 #       s: "NHWC"
12773 #     }
12774 #     allowed_values {
12775 #       list {
12776 #         s: "NHWC"
12777 #         s: "NCHW"
12778 #         s: "NCHW_VECT_C"
12779 #       }
12780 #     }
12781 #   }
12782 # }
12783 # op {
12784 #   name: "Split"
12785 #   input_arg {
12786 #     name: "split_dim"
12787 #     type: DT_INT32
12788 #   }
12789 #   input_arg {
12790 #     name: "value"
12791 #     type_attr: "T"
12792 #   }
12793 #   output_arg {
12794 #     name: "output"
12795 #     type_attr: "T"
12796 #     number_attr: "num_split"
12797 #   }
12798 #   attr {
12799 #     name: "num_split"
12800 #     type: "int"
12801 #     has_minimum: true
12802 #     minimum: 1
12803 #   }
12804 #   attr {
12805 #     name: "T"
12806 #     type: "type"
12807 #   }
12808 # }
12809 # op {
12810 #   name: "SplitV"
12811 #   input_arg {
12812 #     name: "value"
12813 #     type_attr: "T"
12814 #   }
12815 #   input_arg {
12816 #     name: "size_splits"
12817 #     type_attr: "Tlen"
12818 #   }
12819 #   input_arg {
12820 #     name: "split_dim"
12821 #     type: DT_INT32
12822 #   }
12823 #   output_arg {
12824 #     name: "output"
12825 #     type_attr: "T"
12826 #     number_attr: "num_split"
12827 #   }
12828 #   attr {
12829 #     name: "num_split"
12830 #     type: "int"
12831 #     has_minimum: true
12832 #     minimum: 1
12833 #   }
12834 #   attr {
12835 #     name: "T"
12836 #     type: "type"
12837 #   }
12838 #   attr {
12839 #     name: "Tlen"
12840 #     type: "type"
12841 #     default_value {
12842 #       type: DT_INT64
12843 #     }
12844 #     allowed_values {
12845 #       list {
12846 #         type: DT_INT32
12847 #         type: DT_INT64
12848 #       }
12849 #     }
12850 #   }
12851 # }
12852 # op {
12853 #   name: "Squeeze"
12854 #   input_arg {
12855 #     name: "input"
12856 #     type_attr: "T"
12857 #   }
12858 #   output_arg {
12859 #     name: "output"
12860 #     type_attr: "T"
12861 #   }
12862 #   attr {
12863 #     name: "T"
12864 #     type: "type"
12865 #   }
12866 #   attr {
12867 #     name: "squeeze_dims"
12868 #     type: "list(int)"
12869 #     default_value {
12870 #       list {
12871 #       }
12872 #     }
12873 #     has_minimum: true
12874 #   }
12875 # }
12876 # op {
12877 #   name: "StopGradient"
12878 #   input_arg {
12879 #     name: "input"
12880 #     type_attr: "T"
12881 #   }
12882 #   output_arg {
12883 #     name: "output"
12884 #     type_attr: "T"
12885 #   }
12886 #   attr {
12887 #     name: "T"
12888 #     type: "type"
12889 #   }
12890 # }
12891 # op {
12892 #   name: "StridedSlice"
12893 #   input_arg {
12894 #     name: "input"
12895 #     type_attr: "T"
12896 #   }
12897 #   input_arg {
12898 #     name: "begin"
12899 #     type_attr: "Index"
12900 #   }
12901 #   input_arg {
12902 #     name: "end"
12903 #     type_attr: "Index"
12904 #   }
12905 #   input_arg {
12906 #     name: "strides"
12907 #     type_attr: "Index"
12908 #   }
12909 #   output_arg {
12910 #     name: "output"
12911 #     type_attr: "T"
12912 #   }
12913 #   attr {
12914 #     name: "T"
12915 #     type: "type"
12916 #   }
12917 #   attr {
12918 #     name: "Index"
12919 #     type: "type"
12920 #     allowed_values {
12921 #       list {
12922 #         type: DT_INT32
12923 #         type: DT_INT64
12924 #       }
12925 #     }
12926 #   }
12927 #   attr {
12928 #     name: "begin_mask"
12929 #     type: "int"
12930 #     default_value {
12931 #       i: 0
12932 #     }
12933 #   }
12934 #   attr {
12935 #     name: "end_mask"
12936 #     type: "int"
12937 #     default_value {
12938 #       i: 0
12939 #     }
12940 #   }
12941 #   attr {
12942 #     name: "ellipsis_mask"
12943 #     type: "int"
12944 #     default_value {
12945 #       i: 0
12946 #     }
12947 #   }
12948 #   attr {
12949 #     name: "new_axis_mask"
12950 #     type: "int"
12951 #     default_value {
12952 #       i: 0
12953 #     }
12954 #   }
12955 #   attr {
12956 #     name: "shrink_axis_mask"
12957 #     type: "int"
12958 #     default_value {
12959 #       i: 0
12960 #     }
12961 #   }
12962 # }
12963 # op {
12964 #   name: "StridedSliceAssign"
12965 #   input_arg {
12966 #     name: "ref"
12967 #     type_attr: "T"
12968 #     is_ref: true
12969 #   }
12970 #   input_arg {
12971 #     name: "begin"
12972 #     type_attr: "Index"
12973 #   }
12974 #   input_arg {
12975 #     name: "end"
12976 #     type_attr: "Index"
12977 #   }
12978 #   input_arg {
12979 #     name: "strides"
12980 #     type_attr: "Index"
12981 #   }
12982 #   input_arg {
12983 #     name: "value"
12984 #     type_attr: "T"
12985 #   }
12986 #   output_arg {
12987 #     name: "output_ref"
12988 #     type_attr: "T"
12989 #     is_ref: true
12990 #   }
12991 #   attr {
12992 #     name: "T"
12993 #     type: "type"
12994 #   }
12995 #   attr {
12996 #     name: "Index"
12997 #     type: "type"
12998 #     allowed_values {
12999 #       list {
13000 #         type: DT_INT32
13001 #         type: DT_INT64
13002 #       }
13003 #     }
13004 #   }
13005 #   attr {
13006 #     name: "begin_mask"
13007 #     type: "int"
13008 #     default_value {
13009 #       i: 0
13010 #     }
13011 #   }
13012 #   attr {
13013 #     name: "end_mask"
13014 #     type: "int"
13015 #     default_value {
13016 #       i: 0
13017 #     }
13018 #   }
13019 #   attr {
13020 #     name: "ellipsis_mask"
13021 #     type: "int"
13022 #     default_value {
13023 #       i: 0
13024 #     }
13025 #   }
13026 #   attr {
13027 #     name: "new_axis_mask"
13028 #     type: "int"
13029 #     default_value {
13030 #       i: 0
13031 #     }
13032 #   }
13033 #   attr {
13034 #     name: "shrink_axis_mask"
13035 #     type: "int"
13036 #     default_value {
13037 #       i: 0
13038 #     }
13039 #   }
13040 # }
13041 # op {
13042 #   name: "StridedSliceGrad"
13043 #   input_arg {
13044 #     name: "shape"
13045 #     type_attr: "Index"
13046 #   }
13047 #   input_arg {
13048 #     name: "begin"
13049 #     type_attr: "Index"
13050 #   }
13051 #   input_arg {
13052 #     name: "end"
13053 #     type_attr: "Index"
13054 #   }
13055 #   input_arg {
13056 #     name: "strides"
13057 #     type_attr: "Index"
13058 #   }
13059 #   input_arg {
13060 #     name: "dy"
13061 #     type_attr: "T"
13062 #   }
13063 #   output_arg {
13064 #     name: "output"
13065 #     type_attr: "T"
13066 #   }
13067 #   attr {
13068 #     name: "T"
13069 #     type: "type"
13070 #   }
13071 #   attr {
13072 #     name: "Index"
13073 #     type: "type"
13074 #     allowed_values {
13075 #       list {
13076 #         type: DT_INT32
13077 #         type: DT_INT64
13078 #       }
13079 #     }
13080 #   }
13081 #   attr {
13082 #     name: "begin_mask"
13083 #     type: "int"
13084 #     default_value {
13085 #       i: 0
13086 #     }
13087 #   }
13088 #   attr {
13089 #     name: "end_mask"
13090 #     type: "int"
13091 #     default_value {
13092 #       i: 0
13093 #     }
13094 #   }
13095 #   attr {
13096 #     name: "ellipsis_mask"
13097 #     type: "int"
13098 #     default_value {
13099 #       i: 0
13100 #     }
13101 #   }
13102 #   attr {
13103 #     name: "new_axis_mask"
13104 #     type: "int"
13105 #     default_value {
13106 #       i: 0
13107 #     }
13108 #   }
13109 #   attr {
13110 #     name: "shrink_axis_mask"
13111 #     type: "int"
13112 #     default_value {
13113 #       i: 0
13114 #     }
13115 #   }
13116 # }
13117 # op {
13118 #   name: "Tile"
13119 #   input_arg {
13120 #     name: "input"
13121 #     type_attr: "T"
13122 #   }
13123 #   input_arg {
13124 #     name: "multiples"
13125 #     type_attr: "Tmultiples"
13126 #   }
13127 #   output_arg {
13128 #     name: "output"
13129 #     type_attr: "T"
13130 #   }
13131 #   attr {
13132 #     name: "T"
13133 #     type: "type"
13134 #   }
13135 #   attr {
13136 #     name: "Tmultiples"
13137 #     type: "type"
13138 #     default_value {
13139 #       type: DT_INT32
13140 #     }
13141 #     allowed_values {
13142 #       list {
13143 #         type: DT_INT32
13144 #         type: DT_INT64
13145 #       }
13146 #     }
13147 #   }
13148 # }
13149 # op {
13150 #   name: "TileGrad"
13151 #   input_arg {
13152 #     name: "input"
13153 #     type_attr: "T"
13154 #   }
13155 #   input_arg {
13156 #     name: "multiples"
13157 #     type: DT_INT32
13158 #   }
13159 #   output_arg {
13160 #     name: "output"
13161 #     type_attr: "T"
13162 #   }
13163 #   attr {
13164 #     name: "T"
13165 #     type: "type"
13166 #   }
13167 #   deprecation {
13168 #     version: 3
13169 #     explanation: "TileGrad has been replaced with reduce_sum"
13170 #   }
13171 # }
13172 # op {
13173 #   name: "Transpose"
13174 #   input_arg {
13175 #     name: "x"
13176 #     type_attr: "T"
13177 #   }
13178 #   input_arg {
13179 #     name: "perm"
13180 #     type_attr: "Tperm"
13181 #   }
13182 #   output_arg {
13183 #     name: "y"
13184 #     type_attr: "T"
13185 #   }
13186 #   attr {
13187 #     name: "T"
13188 #     type: "type"
13189 #   }
13190 #   attr {
13191 #     name: "Tperm"
13192 #     type: "type"
13193 #     default_value {
13194 #       type: DT_INT32
13195 #     }
13196 #     allowed_values {
13197 #       list {
13198 #         type: DT_INT32
13199 #         type: DT_INT64
13200 #       }
13201 #     }
13202 #   }
13203 # }
13204 # op {
13205 #   name: "Unique"
13206 #   input_arg {
13207 #     name: "x"
13208 #     type_attr: "T"
13209 #   }
13210 #   output_arg {
13211 #     name: "y"
13212 #     type_attr: "T"
13213 #   }
13214 #   output_arg {
13215 #     name: "idx"
13216 #     type_attr: "out_idx"
13217 #   }
13218 #   attr {
13219 #     name: "T"
13220 #     type: "type"
13221 #   }
13222 #   attr {
13223 #     name: "out_idx"
13224 #     type: "type"
13225 #     default_value {
13226 #       type: DT_INT32
13227 #     }
13228 #     allowed_values {
13229 #       list {
13230 #         type: DT_INT32
13231 #         type: DT_INT64
13232 #       }
13233 #     }
13234 #   }
13235 # }
13236 # op {
13237 #   name: "UniqueV2"
13238 #   input_arg {
13239 #     name: "x"
13240 #     type_attr: "T"
13241 #   }
13242 #   input_arg {
13243 #     name: "axis"
13244 #     type_attr: "Taxis"
13245 #   }
13246 #   output_arg {
13247 #     name: "y"
13248 #     type_attr: "T"
13249 #   }
13250 #   output_arg {
13251 #     name: "idx"
13252 #     type_attr: "out_idx"
13253 #   }
13254 #   attr {
13255 #     name: "T"
13256 #     type: "type"
13257 #   }
13258 #   attr {
13259 #     name: "Taxis"
13260 #     type: "type"
13261 #     default_value {
13262 #       type: DT_INT64
13263 #     }
13264 #     allowed_values {
13265 #       list {
13266 #         type: DT_INT32
13267 #         type: DT_INT64
13268 #       }
13269 #     }
13270 #   }
13271 #   attr {
13272 #     name: "out_idx"
13273 #     type: "type"
13274 #     default_value {
13275 #       type: DT_INT32
13276 #     }
13277 #     allowed_values {
13278 #       list {
13279 #         type: DT_INT32
13280 #         type: DT_INT64
13281 #       }
13282 #     }
13283 #   }
13284 # }
13285 # op {
13286 #   name: "UniqueWithCounts"
13287 #   input_arg {
13288 #     name: "x"
13289 #     type_attr: "T"
13290 #   }
13291 #   output_arg {
13292 #     name: "y"
13293 #     type_attr: "T"
13294 #   }
13295 #   output_arg {
13296 #     name: "idx"
13297 #     type_attr: "out_idx"
13298 #   }
13299 #   output_arg {
13300 #     name: "count"
13301 #     type_attr: "out_idx"
13302 #   }
13303 #   attr {
13304 #     name: "T"
13305 #     type: "type"
13306 #   }
13307 #   attr {
13308 #     name: "out_idx"
13309 #     type: "type"
13310 #     default_value {
13311 #       type: DT_INT32
13312 #     }
13313 #     allowed_values {
13314 #       list {
13315 #         type: DT_INT32
13316 #         type: DT_INT64
13317 #       }
13318 #     }
13319 #   }
13320 # }
13321 # op {
13322 #   name: "UniqueWithCountsV2"
13323 #   input_arg {
13324 #     name: "x"
13325 #     type_attr: "T"
13326 #   }
13327 #   input_arg {
13328 #     name: "axis"
13329 #     type_attr: "Taxis"
13330 #   }
13331 #   output_arg {
13332 #     name: "y"
13333 #     type_attr: "T"
13334 #   }
13335 #   output_arg {
13336 #     name: "idx"
13337 #     type_attr: "out_idx"
13338 #   }
13339 #   output_arg {
13340 #     name: "count"
13341 #     type_attr: "out_idx"
13342 #   }
13343 #   attr {
13344 #     name: "T"
13345 #     type: "type"
13346 #   }
13347 #   attr {
13348 #     name: "Taxis"
13349 #     type: "type"
13350 #     default_value {
13351 #       type: DT_INT64
13352 #     }
13353 #     allowed_values {
13354 #       list {
13355 #         type: DT_INT32
13356 #         type: DT_INT64
13357 #       }
13358 #     }
13359 #   }
13360 #   attr {
13361 #     name: "out_idx"
13362 #     type: "type"
13363 #     default_value {
13364 #       type: DT_INT32
13365 #     }
13366 #     allowed_values {
13367 #       list {
13368 #         type: DT_INT32
13369 #         type: DT_INT64
13370 #       }
13371 #     }
13372 #   }
13373 # }
13374 # op {
13375 #   name: "Unpack"
13376 #   input_arg {
13377 #     name: "value"
13378 #     type_attr: "T"
13379 #   }
13380 #   output_arg {
13381 #     name: "output"
13382 #     type_attr: "T"
13383 #     number_attr: "num"
13384 #   }
13385 #   attr {
13386 #     name: "num"
13387 #     type: "int"
13388 #     has_minimum: true
13389 #   }
13390 #   attr {
13391 #     name: "T"
13392 #     type: "type"
13393 #   }
13394 #   attr {
13395 #     name: "axis"
13396 #     type: "int"
13397 #     default_value {
13398 #       i: 0
13399 #     }
13400 #   }
13401 # }
13402 # op {
13403 #   name: "UnravelIndex"
13404 #   input_arg {
13405 #     name: "indices"
13406 #     type_attr: "Tidx"
13407 #   }
13408 #   input_arg {
13409 #     name: "dims"
13410 #     type_attr: "Tidx"
13411 #   }
13412 #   output_arg {
13413 #     name: "output"
13414 #     type_attr: "Tidx"
13415 #   }
13416 #   attr {
13417 #     name: "Tidx"
13418 #     type: "type"
13419 #     default_value {
13420 #       type: DT_INT32
13421 #     }
13422 #     allowed_values {
13423 #       list {
13424 #         type: DT_INT32
13425 #         type: DT_INT64
13426 #       }
13427 #     }
13428 #   }
13429 # }
13430 # op {
13431 #   name: "UpperBound"
13432 #   input_arg {
13433 #     name: "sorted_inputs"
13434 #     type_attr: "T"
13435 #   }
13436 #   input_arg {
13437 #     name: "values"
13438 #     type_attr: "T"
13439 #   }
13440 #   output_arg {
13441 #     name: "output"
13442 #     type_attr: "out_type"
13443 #   }
13444 #   attr {
13445 #     name: "T"
13446 #     type: "type"
13447 #   }
13448 #   attr {
13449 #     name: "out_type"
13450 #     type: "type"
13451 #     default_value {
13452 #       type: DT_INT32
13453 #     }
13454 #     allowed_values {
13455 #       list {
13456 #         type: DT_INT32
13457 #         type: DT_INT64
13458 #       }
13459 #     }
13460 #   }
13461 # }
13462 # op {
13463 #   name: "Where"
13464 #   input_arg {
13465 #     name: "input"
13466 #     type_attr: "T"
13467 #   }
13468 #   output_arg {
13469 #     name: "index"
13470 #     type: DT_INT64
13471 #   }
13472 #   attr {
13473 #     name: "T"
13474 #     type: "type"
13475 #     default_value {
13476 #       type: DT_BOOL
13477 #     }
13478 #     allowed_values {
13479 #       list {
13480 #         type: DT_FLOAT
13481 #         type: DT_DOUBLE
13482 #         type: DT_INT32
13483 #         type: DT_UINT8
13484 #         type: DT_INT16
13485 #         type: DT_INT8
13486 #         type: DT_COMPLEX64
13487 #         type: DT_INT64
13488 #         type: DT_QINT8
13489 #         type: DT_QUINT8
13490 #         type: DT_QINT32
13491 #         type: DT_BFLOAT16
13492 #         type: DT_UINT16
13493 #         type: DT_COMPLEX128
13494 #         type: DT_HALF
13495 #         type: DT_UINT32
13496 #         type: DT_UINT64
13497 #         type: DT_BOOL
13498 #       }
13499 #     }
13500 #   }
13501 # }
13502 # op {
13503 #   name: "ZerosLike"
13504 #   input_arg {
13505 #     name: "x"
13506 #     type_attr: "T"
13507 #   }
13508 #   output_arg {
13509 #     name: "y"
13510 #     type_attr: "T"
13511 #   }
13512 #   attr {
13513 #     name: "T"
13514 #     type: "type"
13515 #   }
13516 # }
13517 _op_def_lib = _InitOpDefLibrary(b"\nm\n\023BatchMatrixBandPart\022\n\n\005input\"\001T\022\r\n\tnum_lower\030\t\022\r\n\tnum_upper\030\t\032\t\n\004band\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixBandPart\nL\n\017BatchMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\022\010\016\022\016Use MatrixDiag\nS\n\023BatchMatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixDiagPart\n^\n\022BatchMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\025\010\016\022\021Use MatrixSetDiag\nr\n\014BatchToSpace\022\n\n\005input\"\001T\022\r\n\005crops\"\004Tidx\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\240\001\n\016BatchToSpaceND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\017\n\005crops\"\006Tcrops\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\032\n\006Tcrops\022\004type\032\0020\003:\006\n\0042\002\003\t\np\n\007Bitcast\022\n\n\005input\"\001T\032\016\n\006output\"\004type\"\"\n\001T\022\004type:\027\n\0252\023\016\023\001\002\t\003\004\021\026\027\006\005\010\022\013\014\017\020\r\"%\n\004type\022\004type:\027\n\0252\023\016\023\001\002\t\003\004\021\026\027\006\005\010\022\013\014\017\020\r\nA\n\rBroadcastArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nR\n\025BroadcastGradientArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\032\007\n\002r1\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nZ\n\013BroadcastTo\022\n\n\005input\"\001T\022\r\n\005shape\"\004Tidx\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nQ\n\rCheckNumerics\022\013\n\006tensor\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\"\021\n\007message\022\006string\nN\n\006Concat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\nI\n\014ConcatOffset\022\016\n\nconcat_dim\030\003\022\014\n\005shape\030\003*\001N\032\r\n\006offset\030\003*\001N\"\014\n\001N\022\003int(\0010\002\nh\n\010ConcatV2\022\016\n\006values\"\001T*\001N\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nY\n\022ConjugateTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\n8\n\005Const\032\017\n\006output\"\005dtype\"\017\n\005value\022\006tensor\"\r\n\005dtype\022\004type\n>\n\025DebugGradientIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\230\001\001\nG\n\030DebugGradientRefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n(\n\010DeepCopy\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\210\001\001\n\205\001\n\014DepthToSpace\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\235\001\n\nDequantize\022\n\n\005input\"\001T\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\n\n\006output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\n;\n\004Diag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n>\n\010DiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n\271\001\n\014EditDistance\022\026\n\022hypothesis_indices\030\t\022\026\n\021hypothesis_values\"\001T\022\024\n\020hypothesis_shape\030\t\022\021\n\rtruth_indices\030\t\022\021\n\014truth_values\"\001T\022\017\n\013truth_shape\030\t\032\n\n\006output\030\001\"\025\n\tnormalize\022\004bool\032\002(\001\"\t\n\001T\022\004type\nG\n\005Empty\022\t\n\005shape\030\003\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\020\n\004init\022\004bool\032\002(\000\210\001\001\nA\n\013EnsureShape\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\016\n\005shape\022\005shape\"\t\n\001T\022\004type\nW\n\nExpandDims\022\n\n\005input\"\001T\022\013\n\003dim\"\004Tdim\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\004Tdim\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\023ExtractImagePatches\022\013\n\006images\"\001T\032\014\n\007patches\"\001T\"\027\n\006ksizes\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\244\001\n\024ExtractVolumePatches\022\n\n\005input\"\001T\032\014\n\007patches\"\001T\"\027\n\006ksizes\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\213\001\n\027FakeQuantWithMinMaxArgs\022\n\n\006inputs\030\001\032\013\n\007outputs\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\244\001\n\037FakeQuantWithMinMaxArgsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\032\r\n\tbackprops\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\ns\n\027FakeQuantWithMinMaxVars\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\302\001\n\037FakeQuantWithMinMaxVarsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n}\n!FakeQuantWithMinMaxVarsPerChannel\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\314\001\n)FakeQuantWithMinMaxVarsPerChannelGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n^\n\004Fill\022\022\n\004dims\"\nindex_type\022\n\n\005value\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nindex_type\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\006Gather\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\034\n\020validate_indices\022\004bool\032\002(\001\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\np\n\010GatherNd\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\226\001\n\010GatherV2\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\022\r\n\004axis\"\005Taxis\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\025\n\005Taxis\022\004type:\006\n\0042\002\003\t\n7\n\016GuaranteeConst\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\210\001\001\n.\n\010Identity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n9\n\tIdentityN\022\n\n\005input2\001T\032\013\n\006output2\001T\"\023\n\001T\022\nlist(type)(\0010\001\n^\n\016ImmutableConst\032\017\n\006tensor\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\"\034\n\022memory_region_name\022\006string\n6\n\nInplaceAdd\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n6\n\nInplaceSub\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n9\n\rInplaceUpdate\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n:\n\021InvertPermutation\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\n\\\n\010ListDiff\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\010\n\003out\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nj\n\nLowerBound\022\022\n\rsorted_inputs\"\001T\022\013\n\006values\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nx\n\016MatrixBandPart\022\n\n\005input\"\001T\022\023\n\tnum_lower\"\006Tindex\022\023\n\tnum_upper\"\006Tindex\032\t\n\004band\"\001T\"\t\n\001T\022\004type\"\032\n\006Tindex\022\004type\032\0020\t:\006\n\0042\002\003\t\n3\n\nMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n6\n\016MatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004type\nB\n\rMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\215\001\n\tMirrorPad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\221\001\n\rMirrorPadGrad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\214\001\n\006OneHot\022\r\n\007indices\"\002TI\022\t\n\005depth\030\003\022\r\n\010on_value\"\001T\022\016\n\toff_value\"\001T\032\013\n\006output\"\001T\"\030\n\004axis\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\t\n\001T\022\004type\"\027\n\002TI\022\004type\032\0020\t:\007\n\0052\003\004\003\t\n8\n\010OnesLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\016\023\001\002\006\004\005\021\003\t\010\022\n\nM\n\004Pack\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\n_\n\003Pad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nw\n\005PadV2\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\022\024\n\017constant_values\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nV\n\016ParallelConcat\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\016\n\005shape\022\005shape\nC\n\013Placeholder\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\024\n\005shape\022\005shape\032\004:\002\030\001\nw\n\rPlaceholderV2\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shapeB6\010\027\0222Placeholder now behaves the same as PlaceholderV2.\nX\n\026PlaceholderWithDefault\022\016\n\005input\"\005dtype\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\nL\n\017PreventGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\007message\022\006string\032\002\022\000\n\354\001\n\025QuantizeAndDequantize\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\031\n\tinput_min\022\005float\032\005%\000\000\000\000\"\031\n\tinput_max\022\005float\032\005%\000\000\000\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002B\'\010\026\022#Replaced by QuantizeAndDequantizeV2\n\257\001\n\027QuantizeAndDequantizeV2\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n\250\001\n\027QuantizeAndDequantizeV3\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\022\014\n\010num_bits\030\003\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\027\n\013range_given\022\004bool\032\002(\001\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n\221\002\n\nQuantizeV2\022\t\n\005input\030\001\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\"R\n\nround_mode\022\006string\032\025\022\023HALF_AWAY_FROM_ZERO:%\n#\022\023HALF_AWAY_FROM_ZERO\022\014HALF_TO_EVEN\n\236\001\n\017QuantizedConcat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\022\021\n\ninput_mins\030\001*\001N\022\022\n\013input_maxes\030\001*\001N\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\n\205\002\n\025QuantizedInstanceNorm\022\006\n\001x\"\001T\022\t\n\005x_min\030\001\022\t\n\005x_max\030\001\032\006\n\001y\"\001T\032\t\n\005y_min\030\001\032\t\n\005y_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\022output_range_given\022\004bool\032\002(\000\"\033\n\013given_y_min\022\005float\032\005%\000\000\000\000\"\033\n\013given_y_max\022\005float\032\005%\000\000\000\000\" \n\020variance_epsilon\022\005float\032\005%\254\305\'7\"\036\n\016min_separation\022\005float\032\005%o\022\203:\n\242\001\n\020QuantizedReshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n)\n\004Rank\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\t\n\001T\022\004type\n:\n\013RefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n[\n\007Reshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\002\n\032ResourceStridedSliceAssign\022\007\n\003ref\030\024\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\210\001\001\nK\n\007Reverse\022\013\n\006tensor\"\001T\022\010\n\004dims\030\n\032\013\n\006output\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\004\006\021\005\003\t\n\023\001\002\010\022\007\n\212\001\n\017ReverseSequence\022\n\n\005input\"\001T\022\023\n\013seq_lengths\"\004Tlen\032\013\n\006output\"\001T\"\016\n\007seq_dim\022\003int\"\024\n\tbatch_dim\022\003int\032\002\030\000\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nl\n\tReverseV2\022\013\n\006tensor\"\001T\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\001T\022\004type:\022\n\0202\016\004\006\021\005\003\t\n\016\023\001\002\010\022\007\ns\n\tScatterNd\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\022\021\n\005shape\"\010Tindices\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\222\001\n\027ScatterNdNonAliasingAdd\022\n\n\005input\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nP\n\005Shape\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\ne\n\006ShapeN\022\r\n\005input\"\001T*\001N\032\025\n\006output\"\010out_type*\001N\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nO\n\004Size\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\na\n\005Slice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\r\n\004size\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\n.\n\010Snapshot\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\177\n\014SpaceToBatch\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"\025\n\nblock_size\022\003int(\0010\002\n\251\001\n\016SpaceToBatchND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\n\205\001\n\014SpaceToDepth\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n[\n\005Split\022\r\n\tsplit_dim\030\003\022\n\n\005value\"\001T\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\n\213\001\n\006SplitV\022\n\n\005value\"\001T\022\023\n\013size_splits\"\004Tlen\022\r\n\tsplit_dim\030\003\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nN\n\007Squeeze\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\037\n\014squeeze_dims\022\tlist(int)\032\002\n\000(\001\n2\n\014StopGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\366\001\n\014StridedSlice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\220\002\n\022StridedSliceAssign\022\013\n\003ref\"\001T\200\001\001\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\032\022\n\noutput_ref\"\001T\200\001\001\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\207\002\n\020StridedSliceGrad\022\016\n\005shape\"\005Index\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\007\n\002dy\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\nc\n\004Tile\022\n\n\005input\"\001T\022\027\n\tmultiples\"\nTmultiples\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nTmultiples\022\004type\032\0020\003:\006\n\0042\002\003\t\nm\n\010TileGrad\022\n\n\005input\"\001T\022\r\n\tmultiples\030\003\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB.\010\003\022*TileGrad has been replaced with reduce_sum\nP\n\tTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unique\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n|\n\010UniqueV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nl\n\020UniqueWithCounts\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\230\001\n\022UniqueWithCountsV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unpack\022\n\n\005value\"\001T\032\020\n\006output\"\001T*\003num\"\014\n\003num\022\003int(\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\nW\n\014UnravelIndex\022\017\n\007indices\"\004Tidx\022\014\n\004dims\"\004Tidx\032\016\n\006output\"\004Tidx\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nj\n\nUpperBound\022\022\n\rsorted_inputs\"\001T\022\013\n\006values\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nE\n\005Where\022\n\n\005input\"\001T\032\t\n\005index\030\t\"%\n\001T\022\004type\032\0020\n:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\n&\n\tZerosLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type")